diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2017-02-06 11:40:05 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2017-02-06 11:40:11 +1100 |
commit | 25902ce7bb4507e29c6a2ae7514ceed9402e12f2 (patch) | |
tree | df121f53a4d799b9c55262c82a2b60dadbea9a07 /drivers | |
parent | 7a966418480a14af177ed2bdcc0349f9257b27ff (diff) | |
parent | bd092ad1463ca0990581fa992e12a9b0ed295d25 (diff) |
Merge remote-tracking branch 'net-next/master'
Diffstat (limited to 'drivers')
763 files changed, 37893 insertions, 17721 deletions
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index c53a9dd1353f..623359e407aa 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1779,7 +1779,7 @@ static int eni_do_init(struct atm_dev *dev) printk(")\n"); printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number, eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA", - media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]); + media_name[eni_in(MID_RES_ID_MCON) & DAUGHTER_ID]); error = suni_init(dev); if (error) diff --git a/drivers/atm/midway.h b/drivers/atm/midway.h index 432525ad5e46..d8bec0f2a71c 100644 --- a/drivers/atm/midway.h +++ b/drivers/atm/midway.h @@ -56,7 +56,7 @@ #define MID_CON_SUNI 0x00000040 /* 0: UTOPIA; 1: SUNI */ #define MID_CON_V6 0x00000020 /* 0: non-pipel UTOPIA (required iff !CON_SUNI; 1: UTOPIA */ -#define DAUGTHER_ID 0x0000001f /* daugther board id */ +#define DAUGHTER_ID 0x0000001f /* daughter board id */ /* * Interrupt Status Acknowledge, Interrupt Status & Interrupt Enable diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 2c1798e38abd..12da68ec48ba 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -136,17 +136,17 @@ static bool bcma_is_core_needed_early(u16 core_id) return false; } -static struct device_node *bcma_of_find_child_device(struct platform_device *parent, +static struct device_node *bcma_of_find_child_device(struct device *parent, struct bcma_device *core) { struct device_node *node; u64 size; const __be32 *reg; - if (!parent || !parent->dev.of_node) + if (!parent->of_node) return NULL; - for_each_child_of_node(parent->dev.of_node, node) { + for_each_child_of_node(parent->of_node, node) { reg = of_get_address(node, 0, &size, NULL); if (!reg) continue; @@ -156,7 +156,7 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par return NULL; } -static int bcma_of_irq_parse(struct platform_device *parent, +static int bcma_of_irq_parse(struct device *parent, struct bcma_device *core, struct of_phandle_args *out_irq, int num) { @@ -169,7 +169,7 @@ static int bcma_of_irq_parse(struct platform_device *parent, return rc; } - out_irq->np = parent->dev.of_node; + out_irq->np = parent->of_node; out_irq->args_count = 1; out_irq->args[0] = num; @@ -177,13 +177,13 @@ static int bcma_of_irq_parse(struct platform_device *parent, return of_irq_parse_raw(laddr, out_irq); } -static unsigned int bcma_of_get_irq(struct platform_device *parent, +static unsigned int bcma_of_get_irq(struct device *parent, struct bcma_device *core, int num) { struct of_phandle_args out_irq; int ret; - if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node) + if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node) return 0; ret = bcma_of_irq_parse(parent, core, &out_irq, num); @@ -196,7 +196,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent, return irq_create_of_mapping(&out_irq); } -static void bcma_of_fill_device(struct platform_device *parent, +static void bcma_of_fill_device(struct device *parent, struct bcma_device *core) { struct device_node *node; @@ -227,7 +227,7 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num) return mips_irq <= 4 ? mips_irq + 2 : 0; } if (bus->host_pdev) - return bcma_of_get_irq(bus->host_pdev, core, num); + return bcma_of_get_irq(&bus->host_pdev->dev, core, num); return 0; case BCMA_HOSTTYPE_SDIO: return 0; @@ -253,7 +253,8 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) { core->dma_dev = &bus->host_pdev->dev; core->dev.parent = &bus->host_pdev->dev; - bcma_of_fill_device(bus->host_pdev, core); + if (core->dev.parent) + bcma_of_fill_device(core->dev.parent, core); } else { core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; @@ -633,8 +634,11 @@ static int bcma_device_probe(struct device *dev) drv); int err = 0; + get_device(dev); if (adrv->probe) err = adrv->probe(core); + if (err) + put_device(dev); return err; } @@ -647,6 +651,7 @@ static int bcma_device_remove(struct device *dev) if (adrv->remove) adrv->remove(core); + put_device(dev); return 0; } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b3ef47c3ab73..31803b367104 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; struct mlx5_ib_cq *cq = to_mcq(ibcq); - void __iomem *uar_page = mdev->priv.uuari.uars[0].map; + void __iomem *uar_page = mdev->priv.uar->map; unsigned long irq_flags; int ret = 0; @@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) mlx5_cq_arm(&cq->mcq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, - uar_page, - MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), - to_mcq(ibcq)->mcq.cons_index); + uar_page, to_mcq(ibcq)->mcq.cons_index); return ret; } @@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, MLX5_SET(cqc, cqc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = to_mucontext(context)->uuari.uars[0].index; + *index = to_mucontext(context)->bfregi.sys_pages[0]; if (ucmd.cqe_comp_en == 1) { if (unlikely((*cqe_size != 64) || @@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, MLX5_SET(cqc, cqc, log_page_size, cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - *index = dev->mdev->priv.uuari.uars[0].index; + *index = dev->mdev->priv.uar->index; return 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d566f6738833..9d8535385bb8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -53,6 +53,7 @@ #include <linux/in.h> #include <linux/etherdevice.h> #include <linux/mlx5/fs.h> +#include <linux/mlx5/vport.h> #include "mlx5_ib.h" #define DRIVER_NAME "mlx5_ib" @@ -672,17 +673,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); } - if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, - uhw->outlen)) { - resp.mlx5_ib_support_multi_pkt_send_wqes = - MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); - resp.response_length += - sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); - } - - if (field_avail(typeof(resp), reserved, uhw->outlen)) - resp.response_length += sizeof(resp.reserved); - if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { resp.cqe_comp_caps.max_num = MLX5_CAP_GEN(dev->mdev, cqe_compression) ? @@ -706,6 +696,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.packet_pacing_caps); } + if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, + uhw->outlen)) { + resp.mlx5_ib_support_multi_pkt_send_wqes = + MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); + resp.response_length += + sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); + } + + if (field_avail(typeof(resp), reserved, uhw->outlen)) + resp.response_length += sizeof(resp.reserved); + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); @@ -992,6 +993,86 @@ out: return err; } +static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) +{ + mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", + caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); +} + +static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, + struct mlx5_ib_alloc_ucontext_req_v2 *req, + u32 *num_sys_pages) +{ + int uars_per_sys_page; + int bfregs_per_sys_page; + int ref_bfregs = req->total_num_bfregs; + + if (req->total_num_bfregs == 0) + return -EINVAL; + + BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); + BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); + + if (req->total_num_bfregs > MLX5_MAX_BFREGS) + return -ENOMEM; + + uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); + bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; + req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); + *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; + + if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) + return -EINVAL; + + mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n", + MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", + lib_uar_4k ? "yes" : "no", ref_bfregs, + req->total_num_bfregs, *num_sys_pages); + + return 0; +} + +static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) +{ + struct mlx5_bfreg_info *bfregi; + int err; + int i; + + bfregi = &context->bfregi; + for (i = 0; i < bfregi->num_sys_pages; i++) { + err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); + if (err) + goto error; + + mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); + } + return 0; + +error: + for (--i; i >= 0; i--) + if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) + mlx5_ib_warn(dev, "failed to free uar %d\n", i); + + return err; +} + +static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) +{ + struct mlx5_bfreg_info *bfregi; + int err; + int i; + + bfregi = &context->bfregi; + for (i = 0; i < bfregi->num_sys_pages; i++) { + err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); + if (err) { + mlx5_ib_warn(dev, "failed to free uar %d\n", i); + return err; + } + } + return 0; +} + static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { @@ -999,17 +1080,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_req_v2 req = {}; struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_ib_ucontext *context; - struct mlx5_uuar_info *uuari; - struct mlx5_uar *uars; - int gross_uuars; - int num_uars; + struct mlx5_bfreg_info *bfregi; int ver; - int uuarn; int err; - int i; size_t reqlen; size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, max_cqe_version); + bool lib_uar_4k; if (!dev->ib_active) return ERR_PTR(-EAGAIN); @@ -1032,27 +1109,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (req.flags) return ERR_PTR(-EINVAL); - if (req.total_num_uuars > MLX5_MAX_UUARS) - return ERR_PTR(-ENOMEM); - - if (req.total_num_uuars == 0) - return ERR_PTR(-EINVAL); - if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) return ERR_PTR(-EOPNOTSUPP); - if (reqlen > sizeof(req) && - !ib_is_udata_cleared(udata, sizeof(req), - reqlen - sizeof(req))) - return ERR_PTR(-EOPNOTSUPP); - - req.total_num_uuars = ALIGN(req.total_num_uuars, - MLX5_NON_FP_BF_REGS_PER_PAGE); - if (req.num_low_latency_uuars > req.total_num_uuars - 1) + req.total_num_bfregs = ALIGN(req.total_num_bfregs, + MLX5_NON_FP_BFREGS_PER_UAR); + if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return ERR_PTR(-EINVAL); - num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; - gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); @@ -1065,6 +1129,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.cqe_version = min_t(__u8, (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), req.max_cqe_version); + resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; + resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; resp.response_length = min(offsetof(typeof(resp), response_length) + sizeof(resp.response_length), udata->outlen); @@ -1072,58 +1140,58 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (!context) return ERR_PTR(-ENOMEM); - uuari = &context->uuari; - mutex_init(&uuari->lock); - uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); - if (!uars) { - err = -ENOMEM; + lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; + bfregi = &context->bfregi; + + /* updates req->total_num_bfregs */ + err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages); + if (err) goto out_ctx; - } - uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), - sizeof(*uuari->bitmap), + mutex_init(&bfregi->lock); + bfregi->lib_uar_4k = lib_uar_4k; + bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count), GFP_KERNEL); - if (!uuari->bitmap) { + if (!bfregi->count) { err = -ENOMEM; - goto out_uar_ctx; - } - /* - * clear all fast path uuars - */ - for (i = 0; i < gross_uuars; i++) { - uuarn = i & 3; - if (uuarn == 2 || uuarn == 3) - set_bit(i, uuari->bitmap); + goto out_ctx; } - uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { + bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, + sizeof(*bfregi->sys_pages), + GFP_KERNEL); + if (!bfregi->sys_pages) { err = -ENOMEM; - goto out_bitmap; + goto out_count; } - for (i = 0; i < num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); - if (err) - goto out_count; - } + err = allocate_uars(dev, context); + if (err) + goto out_sys_pages; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; #endif + context->upd_xlt_page = __get_free_page(GFP_KERNEL); + if (!context->upd_xlt_page) { + err = -ENOMEM; + goto out_uars; + } + mutex_init(&context->upd_xlt_page_mutex); + if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { err = mlx5_core_alloc_transport_domain(dev->mdev, &context->tdn); if (err) - goto out_uars; + goto out_page; } INIT_LIST_HEAD(&context->vma_private_list); INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); - resp.tot_uuars = req.total_num_uuars; + resp.tot_bfregs = req.total_num_bfregs; resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); if (field_avail(typeof(resp), cqe_version, udata->outlen)) @@ -1135,32 +1203,46 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.response_length += sizeof(resp.cmds_supp_uhw); } + if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { + if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { + mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); + resp.eth_min_inline++; + } + resp.response_length += sizeof(resp.eth_min_inline); + } + /* * We don't want to expose information from the PCI bar that is located * after 4096 bytes, so if the arch only supports larger pages, let's * pretend we don't support reading the HCA's core clock. This is also * forced by mmap function. */ - if (PAGE_SIZE <= 4096 && - field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { - resp.comp_mask |= - MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; - resp.hca_core_clock_offset = - offsetof(struct mlx5_init_seg, internal_timer_h) % - PAGE_SIZE; + if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { + if (PAGE_SIZE <= 4096) { + resp.comp_mask |= + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; + resp.hca_core_clock_offset = + offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; + } resp.response_length += sizeof(resp.hca_core_clock_offset) + sizeof(resp.reserved2); } + if (field_avail(typeof(resp), log_uar_size, udata->outlen)) + resp.response_length += sizeof(resp.log_uar_size); + + if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) + resp.response_length += sizeof(resp.num_uars_per_page); + err = ib_copy_to_udata(udata, &resp, resp.response_length); if (err) goto out_td; - uuari->ver = ver; - uuari->num_low_latency_uuars = req.num_low_latency_uuars; - uuari->uars = uars; - uuari->num_uars = num_uars; + bfregi->ver = ver; + bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; context->cqe_version = resp.cqe_version; + context->lib_caps = req.lib_caps; + print_lib_caps(dev, context->lib_caps); return &context->ibucontext; @@ -1168,20 +1250,21 @@ out_td: if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); +out_page: + free_page(context->upd_xlt_page); + out_uars: - for (i--; i >= 0; i--) - mlx5_cmd_free_uar(dev->mdev, uars[i].index); -out_count: - kfree(uuari->count); + deallocate_uars(dev, context); -out_bitmap: - kfree(uuari->bitmap); +out_sys_pages: + kfree(bfregi->sys_pages); -out_uar_ctx: - kfree(uars); +out_count: + kfree(bfregi->count); out_ctx: kfree(context); + return ERR_PTR(err); } @@ -1189,28 +1272,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); - struct mlx5_uuar_info *uuari = &context->uuari; - int i; + struct mlx5_bfreg_info *bfregi; + bfregi = &context->bfregi; if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); - for (i = 0; i < uuari->num_uars; i++) { - if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) - mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); - } - - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->uars); + free_page(context->upd_xlt_page); + deallocate_uars(dev, context); + kfree(bfregi->sys_pages); + kfree(bfregi->count); kfree(context); return 0; } -static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) +static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, + int idx) { - return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; + int fw_uars_per_page; + + fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; + + return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + + bfregi->sys_pages[idx] / fw_uars_per_page; } static int get_command(unsigned long offset) @@ -1365,11 +1451,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) { - struct mlx5_uuar_info *uuari = &context->uuari; + struct mlx5_bfreg_info *bfregi = &context->bfregi; int err; unsigned long idx; phys_addr_t pfn, pa; pgprot_t prot; + int uars_per_page; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); + idx = get_index(vma->vm_pgoff); + if (idx % uars_per_page || + idx * uars_per_page >= bfregi->num_sys_pages) { + mlx5_ib_warn(dev, "invalid uar index %lu\n", idx); + return -EINVAL; + } switch (cmd) { case MLX5_IB_MMAP_WC_PAGE: @@ -1392,14 +1490,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, return -EINVAL; } - if (vma->vm_end - vma->vm_start != PAGE_SIZE) - return -EINVAL; - - idx = get_index(vma->vm_pgoff); - if (idx >= uuari->num_uars) - return -EINVAL; - - pfn = uar_index2pfn(dev, uuari->uars[idx].index); + pfn = uar_index2pfn(dev, bfregi, idx); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); vma->vm_page_prot = prot; @@ -1622,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v, if (ib_spec->eth.mask.vlan_tag) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); @@ -3060,8 +3151,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (mlx5_use_mad_ifc(dev)) get_ext_port_caps(dev); - MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); - if (!mlx5_lag_is_active(mdev)) name = "mlx5_%d"; else @@ -3237,9 +3326,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (err) goto err_odp; + dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); + if (!dev->mdev->priv.uar) + goto err_q_cnt; + + err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); + if (err) + goto err_uar_page; + + err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); + if (err) + goto err_bfreg; + err = ib_register_device(&dev->ib_dev, NULL); if (err) - goto err_q_cnt; + goto err_fp_bfreg; err = create_umr_res(dev); if (err) @@ -3262,6 +3363,15 @@ err_umrc: err_dev: ib_unregister_device(&dev->ib_dev); +err_fp_bfreg: + mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); + +err_bfreg: + mlx5_free_bfreg(dev->mdev, &dev->bfreg); + +err_uar_page: + mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); + err_q_cnt: mlx5_ib_dealloc_q_counters(dev); @@ -3293,6 +3403,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) mlx5_remove_netdev_notifier(dev); ib_unregister_device(&dev->ib_dev); + mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); + mlx5_free_bfreg(dev->mdev, &dev->bfreg); + mlx5_put_uars_page(dev->mdev, mdev->priv.uar); mlx5_ib_dealloc_q_counters(dev); destroy_umrc_res(dev); mlx5_ib_odp_remove_one(dev); @@ -3307,6 +3420,9 @@ static struct mlx5_interface mlx5_ib_interface = { .add = mlx5_ib_add, .remove = mlx5_ib_remove, .event = mlx5_ib_event, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + .pfault = mlx5_ib_pfault, +#endif .protocol = MLX5_INTERFACE_PROTOCOL_IB, }; @@ -3317,25 +3433,14 @@ static int __init mlx5_ib_init(void) if (deprecated_prof_sel != 2) pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); - err = mlx5_ib_odp_init(); - if (err) - return err; - err = mlx5_register_interface(&mlx5_ib_interface); - if (err) - goto clean_odp; - - return err; -clean_odp: - mlx5_ib_odp_cleanup(); return err; } static void __exit mlx5_ib_cleanup(void) { mlx5_unregister_interface(&mlx5_ib_interface); - mlx5_ib_odp_cleanup(); } module_init(mlx5_ib_init); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 6851357c16f4..778d8a18925f 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -159,7 +159,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, unsigned long umem_page_shift = ilog2(umem->page_size); int shift = page_shift - umem_page_shift; int mask = (1 << shift) - 1; - int i, k; + int i, k, idx; u64 cur = 0; u64 base; int len; @@ -185,18 +185,36 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> umem_page_shift; base = sg_dma_address(sg); - for (k = 0; k < len; k++) { + + /* Skip elements below offset */ + if (i + len < offset << shift) { + i += len; + continue; + } + + /* Skip pages below offset */ + if (i < offset << shift) { + k = (offset << shift) - i; + i = offset << shift; + } else { + k = 0; + } + + for (; k < len; k++) { if (!(i & mask)) { cur = base + (k << umem_page_shift); cur |= access_flags; + idx = (i >> shift) - offset; - pas[i >> shift] = cpu_to_be64(cur); + pas[idx] = cpu_to_be64(cur); mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", - i >> shift, be64_to_cpu(pas[i >> shift])); - } else - mlx5_ib_dbg(dev, "=====> 0x%llx\n", - base + (k << umem_page_shift)); + i >> shift, be64_to_cpu(pas[idx])); + } i++; + + /* Stop after num_pages reached */ + if (i >> shift >= offset + num_pages) + return; } } } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6c6057eb60ea..e1a4b93dce6b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -90,7 +90,6 @@ enum mlx5_ib_latency_class { MLX5_IB_LATENCY_CLASS_LOW, MLX5_IB_LATENCY_CLASS_MEDIUM, MLX5_IB_LATENCY_CLASS_HIGH, - MLX5_IB_LATENCY_CLASS_FAST_PATH }; enum mlx5_ib_mad_ifc_flags { @@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags { }; enum { - MLX5_CROSS_CHANNEL_UUAR = 0, + MLX5_CROSS_CHANNEL_BFREG = 0, }; enum { @@ -120,11 +119,16 @@ struct mlx5_ib_ucontext { /* protect doorbell record alloc/free */ struct mutex db_page_mutex; - struct mlx5_uuar_info uuari; + struct mlx5_bfreg_info bfregi; u8 cqe_version; /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + + unsigned long upd_xlt_page; + /* protect ODP/KSM */ + struct mutex upd_xlt_page_mutex; + u64 lib_caps; }; static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) @@ -174,13 +178,12 @@ struct mlx5_ib_flow_db { * enum ib_send_flags and enum ib_qp_type for low-level driver */ -#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START -#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1) -#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2) - -#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3) -#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4) -#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END +#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) +#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) +#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) +#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) +#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) +#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 /* @@ -190,6 +193,16 @@ struct mlx5_ib_flow_db { #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 +#define MLX5_IB_UMR_OCTOWORD 16 +#define MLX5_IB_UMR_XLT_ALIGNMENT 64 + +#define MLX5_IB_UPD_XLT_ZAP BIT(0) +#define MLX5_IB_UPD_XLT_ENABLE BIT(1) +#define MLX5_IB_UPD_XLT_ATOMIC BIT(2) +#define MLX5_IB_UPD_XLT_ADDR BIT(3) +#define MLX5_IB_UPD_XLT_PD BIT(4) +#define MLX5_IB_UPD_XLT_ACCESS BIT(5) + /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. * * These flags are intended for internal use by the mlx5_ib driver, and they @@ -264,29 +277,6 @@ struct mlx5_ib_rwq_ind_table { u32 rqtn; }; -/* - * Connect-IB can trigger up to four concurrent pagefaults - * per-QP. - */ -enum mlx5_ib_pagefault_context { - MLX5_IB_PAGEFAULT_RESPONDER_READ, - MLX5_IB_PAGEFAULT_REQUESTOR_READ, - MLX5_IB_PAGEFAULT_RESPONDER_WRITE, - MLX5_IB_PAGEFAULT_REQUESTOR_WRITE, - MLX5_IB_PAGEFAULT_CONTEXTS -}; - -static inline enum mlx5_ib_pagefault_context - mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault) -{ - return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE); -} - -struct mlx5_ib_pfault { - struct work_struct work; - struct mlx5_pagefault mpfault; -}; - struct mlx5_ib_ubuffer { struct ib_umem *umem; int buf_size; @@ -334,6 +324,12 @@ struct mlx5_ib_raw_packet_qp { struct mlx5_ib_rq rq; }; +struct mlx5_bf { + int buf_size; + unsigned long offset; + struct mlx5_sq_bfreg *bfreg; +}; + struct mlx5_ib_qp { struct ib_qp ibqp; union { @@ -359,33 +355,19 @@ struct mlx5_ib_qp { int wq_sig; int scat_cqe; int max_inline_data; - struct mlx5_bf *bf; + struct mlx5_bf bf; int has_rq; /* only for user space QPs. For kernel * we have it from the bf object */ - int uuarn; + int bfregn; int create_type; /* Store signature errors */ bool signature_en; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - /* - * A flag that is true for QP's that are in a state that doesn't - * allow page faults, and shouldn't schedule any more faults. - */ - int disable_page_faults; - /* - * The disable_page_faults_lock protects a QP's disable_page_faults - * field, allowing for a thread to atomically check whether the QP - * allows page faults, and if so schedule a page fault. - */ - spinlock_t disable_page_faults_lock; - struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; -#endif struct list_head qps_list; struct list_head cq_recv_list; struct list_head cq_send_list; @@ -414,13 +396,11 @@ enum mlx5_ib_qp_flags { struct mlx5_umr_wr { struct ib_send_wr wr; - union { - u64 virt_addr; - u64 offset; - } target; + u64 virt_addr; + u64 offset; struct ib_pd *pd; unsigned int page_shift; - unsigned int npages; + unsigned int xlt_size; u64 length; int access_flags; u32 mkey; @@ -617,7 +597,6 @@ struct mlx5_ib_dev { struct ib_device ib_dev; struct mlx5_core_dev *mdev; struct mlx5_roce roce; - MLX5_DECLARE_DOORBELL_LOCK(uar_lock); int num_ports; /* serialize update of capability mask */ @@ -634,6 +613,7 @@ struct mlx5_ib_dev { int fill_delay; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_odp_caps odp_caps; + u64 odp_max_size; /* * Sleepable RCU that prevents destruction of MRs while they are still * being used by a page fault handler. @@ -646,6 +626,8 @@ struct mlx5_ib_dev { struct list_head qp_list; /* Array with num_ports elements */ struct mlx5_ib_port *port; + struct mlx5_sq_bfreg bfreg; + struct mlx5_sq_bfreg fp_bfreg; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@ -787,8 +769,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); int mlx5_ib_dealloc_mw(struct ib_mw *mw); -int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, - int npages, int zap); +int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, + int page_shift, int flags); int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_pd *pd, struct ib_udata *udata); @@ -857,18 +839,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -extern struct workqueue_struct *mlx5_ib_page_fault_wq; - void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); -void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault); -void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); +void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, + struct mlx5_pagefault *pfault); int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); -void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); -void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ @@ -877,13 +854,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) return; } -static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {} static inline int mlx5_ib_odp_init(void) { return 0; } static inline void mlx5_ib_odp_cleanup(void) {} -static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} -static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ @@ -1001,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); } + +static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) +{ + return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_UARS_IN_PAGE : 1; +} + +static inline int get_num_uars(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) +{ + return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages; +} + #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 8f608debe141..8cf2a67f9fb0 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -46,14 +46,9 @@ enum { }; #define MLX5_UMR_ALIGN 2048 -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -static __be64 mlx5_ib_update_mtt_emergency_buffer[ - MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)] - __aligned(MLX5_UMR_ALIGN); -static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex); -#endif static int clean_mr(struct mlx5_ib_mr *mr); +static int use_umr(struct mlx5_ib_dev *dev, int order); static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { @@ -134,6 +129,7 @@ static void reg_mr_callback(int status, void *context) return; } + mr->mmkey.type = MLX5_MKEY_MR; spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); key = dev->mdev->priv.mkey_key++; spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); @@ -629,7 +625,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->dev = dev; if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && - (mlx5_core_is_pf(dev->mdev))) + mlx5_core_is_pf(dev->mdev) && + use_umr(dev, ent->order)) limit = dev->mdev->profile->mr_cache[i].limit; else limit = 0; @@ -732,6 +729,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) goto err_in; kfree(in); + mr->mmkey.type = MLX5_MKEY_MR; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; @@ -757,94 +755,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size) return (npages + 1) / 2; } -static int use_umr(int order) +static int use_umr(struct mlx5_ib_dev *dev, int order) { + if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + return order < MAX_MR_CACHE_ENTRIES + 2; return order <= MLX5_MAX_UMR_SHIFT; } -static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, - int npages, int page_shift, int *size, - __be64 **mr_pas, dma_addr_t *dma) -{ - __be64 *pas; - struct device *ddev = dev->ib_dev.dma_device; - - /* - * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the pas array, we allocate - * a little more. - */ - *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); - *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); - if (!(*mr_pas)) - return -ENOMEM; - - pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN); - mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); - /* Clear padding after the actual pages. */ - memset(pas + npages, 0, *size - npages * sizeof(u64)); - - *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE); - if (dma_mapping_error(ddev, *dma)) { - kfree(*mr_pas); - return -ENOMEM; - } - - return 0; -} - -static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, - struct ib_sge *sg, u64 dma, int n, u32 key, - int page_shift) -{ - struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - sg->addr = dma; - sg->length = ALIGN(sizeof(u64) * n, 64); - sg->lkey = dev->umrc.pd->local_dma_lkey; - - wr->next = NULL; - wr->sg_list = sg; - if (n) - wr->num_sge = 1; - else - wr->num_sge = 0; - - wr->opcode = MLX5_IB_WR_UMR; - - umrwr->npages = n; - umrwr->page_shift = page_shift; - umrwr->mkey = key; -} - -static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, - struct ib_sge *sg, u64 dma, int n, u32 key, - int page_shift, u64 virt_addr, u64 len, - int access_flags) -{ - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift); - - wr->send_flags = 0; - - umrwr->target.virt_addr = virt_addr; - umrwr->length = len; - umrwr->access_flags = access_flags; - umrwr->pd = pd; -} - -static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, - struct ib_send_wr *wr, u32 key) -{ - struct mlx5_umr_wr *umrwr = umr_wr(wr); - - wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; - wr->opcode = MLX5_IB_WR_UMR; - umrwr->mkey = key; -} - static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int access_flags, struct ib_umem **umem, int *npages, int *page_shift, int *ncont, @@ -891,21 +808,39 @@ static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) init_completion(&context->done); } +static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, + struct mlx5_umr_wr *umrwr) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr *bad; + int err; + struct mlx5_ib_umr_context umr_context; + + mlx5_ib_init_umr_context(&umr_context); + umrwr->wr.wr_cqe = &umr_context.cqe; + + down(&umrc->sem); + err = ib_post_send(umrc->qp, &umrwr->wr, &bad); + if (err) { + mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); + } else { + wait_for_completion(&umr_context.done); + if (umr_context.status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "reg umr failed (%u)\n", + umr_context.status); + err = -EFAULT; + } + } + up(&umrc->sem); + return err; +} + static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct device *ddev = dev->ib_dev.dma_device; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; - struct mlx5_umr_wr umrwr = {}; - struct ib_send_wr *bad; struct mlx5_ib_mr *mr; - struct ib_sge sg; - int size; - __be64 *mr_pas; - dma_addr_t dma; int err = 0; int i; @@ -924,173 +859,174 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, if (!mr) return ERR_PTR(-EAGAIN); - err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas, - &dma); - if (err) - goto free_mr; - - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; - prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, - page_shift, virt_addr, len, access_flags); - - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); - if (err) { - mlx5_ib_warn(dev, "post send failed, err %d\n", err); - goto unmap_dma; - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "reg umr failed\n"); - err = -EFAULT; - } - } - + mr->ibmr.pd = pd; + mr->umem = umem; + mr->access_flags = access_flags; + mr->desc_size = sizeof(struct mlx5_mtt); mr->mmkey.iova = virt_addr; mr->mmkey.size = len; mr->mmkey.pd = to_mpd(pd)->pdn; - mr->live = 1; + err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, + MLX5_IB_UPD_XLT_ENABLE); -unmap_dma: - up(&umrc->sem); - dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); - - kfree(mr_pas); - -free_mr: if (err) { free_cached_mr(dev, mr); return ERR_PTR(err); } + mr->live = 1; + return mr; } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, - int zap) +static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, + void *xlt, int page_shift, size_t size, + int flags) { struct mlx5_ib_dev *dev = mr->dev; - struct device *ddev = dev->ib_dev.dma_device; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; struct ib_umem *umem = mr->umem; + + npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); + + if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { + __mlx5_ib_populate_pas(dev, umem, page_shift, + idx, npages, xlt, + MLX5_IB_MTT_PRESENT); + /* Clear padding after the pages + * brought from the umem. + */ + memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, + size - npages * sizeof(struct mlx5_mtt)); + } + + return npages; +} + +#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ + MLX5_UMR_MTT_ALIGNMENT) +#define MLX5_SPARE_UMR_CHUNK 0x10000 + +int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, + int page_shift, int flags) +{ + struct mlx5_ib_dev *dev = mr->dev; + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_ib_ucontext *uctx = NULL; int size; - __be64 *pas; + void *xlt; dma_addr_t dma; - struct ib_send_wr *bad; struct mlx5_umr_wr wr; struct ib_sge sg; int err = 0; - const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); - const int page_index_mask = page_index_alignment - 1; + int desc_size = sizeof(struct mlx5_mtt); + const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; + const int page_mask = page_align - 1; size_t pages_mapped = 0; size_t pages_to_map = 0; size_t pages_iter = 0; - int use_emergency_buf = 0; + gfp_t gfp; /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, - * so we need to align the offset and length accordingly */ - if (start_page_index & page_index_mask) { - npages += start_page_index & page_index_mask; - start_page_index &= ~page_index_mask; + * so we need to align the offset and length accordingly + */ + if (idx & page_mask) { + npages += idx & page_mask; + idx &= ~page_mask; } - pages_to_map = ALIGN(npages, page_index_alignment); + gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; + gfp |= __GFP_ZERO | __GFP_NOWARN; - if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES) - return -EINVAL; + pages_to_map = ALIGN(npages, page_align); + size = desc_size * pages_to_map; + size = min_t(int, size, MLX5_MAX_UMR_CHUNK); + + xlt = (void *)__get_free_pages(gfp, get_order(size)); + if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { + mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", + size, get_order(size), MLX5_SPARE_UMR_CHUNK); - size = sizeof(u64) * pages_to_map; - size = min_t(int, PAGE_SIZE, size); - /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim - * code, when we are called from an invalidation. The pas buffer must - * be 2k-aligned for Connect-IB. */ - pas = (__be64 *)get_zeroed_page(GFP_ATOMIC); - if (!pas) { - mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n"); - pas = mlx5_ib_update_mtt_emergency_buffer; - size = MLX5_UMR_MTT_MIN_CHUNK_SIZE; - use_emergency_buf = 1; - mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex); - memset(pas, 0, size); + size = MLX5_SPARE_UMR_CHUNK; + xlt = (void *)__get_free_pages(gfp, get_order(size)); } - pages_iter = size / sizeof(u64); - dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE); + + if (!xlt) { + uctx = to_mucontext(mr->ibmr.uobject->context); + mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); + size = PAGE_SIZE; + xlt = (void *)uctx->upd_xlt_page; + mutex_lock(&uctx->upd_xlt_page_mutex); + memset(xlt, 0, size); + } + pages_iter = size / desc_size; + dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) { - mlx5_ib_err(dev, "unable to map DMA during MTT update.\n"); + mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); err = -ENOMEM; - goto free_pas; + goto free_xlt; } + sg.addr = dma; + sg.lkey = dev->umrc.pd->local_dma_lkey; + + memset(&wr, 0, sizeof(wr)); + wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; + if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) + wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; + wr.wr.sg_list = &sg; + wr.wr.num_sge = 1; + wr.wr.opcode = MLX5_IB_WR_UMR; + + wr.pd = mr->ibmr.pd; + wr.mkey = mr->mmkey.key; + wr.length = mr->mmkey.size; + wr.virt_addr = mr->mmkey.iova; + wr.access_flags = mr->access_flags; + wr.page_shift = page_shift; + for (pages_mapped = 0; pages_mapped < pages_to_map && !err; - pages_mapped += pages_iter, start_page_index += pages_iter) { + pages_mapped += pages_iter, idx += pages_iter) { dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); - - npages = min_t(size_t, - pages_iter, - ib_umem_num_pages(umem) - start_page_index); - - if (!zap) { - __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT, - start_page_index, npages, pas, - MLX5_IB_MTT_PRESENT); - /* Clear padding after the pages brought from the - * umem. */ - memset(pas + npages, 0, size - npages * sizeof(u64)); - } + npages = populate_xlt(mr, idx, pages_iter, xlt, + page_shift, size, flags); dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); - mlx5_ib_init_umr_context(&umr_context); - - memset(&wr, 0, sizeof(wr)); - wr.wr.wr_cqe = &umr_context.cqe; - - sg.addr = dma; - sg.length = ALIGN(npages * sizeof(u64), - MLX5_UMR_MTT_ALIGNMENT); - sg.lkey = dev->umrc.pd->local_dma_lkey; + sg.length = ALIGN(npages * desc_size, + MLX5_UMR_MTT_ALIGNMENT); + + if (pages_mapped + pages_iter >= pages_to_map) { + if (flags & MLX5_IB_UPD_XLT_ENABLE) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_ENABLE_MR | + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | + MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; + if (flags & MLX5_IB_UPD_XLT_PD || + flags & MLX5_IB_UPD_XLT_ACCESS) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; + if (flags & MLX5_IB_UPD_XLT_ADDR) + wr.wr.send_flags |= + MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; + } - wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | - MLX5_IB_SEND_UMR_UPDATE_MTT; - wr.wr.sg_list = &sg; - wr.wr.num_sge = 1; - wr.wr.opcode = MLX5_IB_WR_UMR; - wr.npages = sg.length / sizeof(u64); - wr.page_shift = PAGE_SHIFT; - wr.mkey = mr->mmkey.key; - wr.target.offset = start_page_index; + wr.offset = idx * desc_size; + wr.xlt_size = sg.length; - down(&umrc->sem); - err = ib_post_send(umrc->qp, &wr.wr, &bad); - if (err) { - mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_err(dev, "UMR completion failed, code %d\n", - umr_context.status); - err = -EFAULT; - } - } - up(&umrc->sem); + err = mlx5_ib_post_send_wait(dev, &wr); } dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); -free_pas: - if (!use_emergency_buf) - free_page((unsigned long)pas); +free_xlt: + if (uctx) + mutex_unlock(&uctx->upd_xlt_page_mutex); else - mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex); + free_pages((unsigned long)xlt, get_order(size)); return err; } -#endif /* * If ibmr is NULL it will be allocated by reg_create. @@ -1122,8 +1058,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, goto err_1; } pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - mlx5_ib_populate_pas(dev, umem, page_shift, pas, - pg_cap ? MLX5_IB_MTT_PRESENT : 0); + if (!(access_flags & IB_ACCESS_ON_DEMAND)) + mlx5_ib_populate_pas(dev, umem, page_shift, pas, + pg_cap ? MLX5_IB_MTT_PRESENT : 0); /* The pg_access bit allows setting the access flags * in the page list submitted with the command. */ @@ -1153,6 +1090,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, mlx5_ib_warn(dev, "create mkey failed\n"); goto err_2; } + mr->mmkey.type = MLX5_MKEY_MR; mr->umem = umem; mr->dev = dev; mr->live = 1; @@ -1204,14 +1142,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(order)) { + if (use_umr(dev, order)) { mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d", order); mr = NULL; } - } else if (access_flags & IB_ACCESS_ON_DEMAND) { + } else if (access_flags & IB_ACCESS_ON_DEMAND && + !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); goto error; @@ -1248,106 +1187,39 @@ error: static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_core_dev *mdev = dev->mdev; - struct umr_common *umrc = &dev->umrc; - struct mlx5_ib_umr_context umr_context; struct mlx5_umr_wr umrwr = {}; - struct ib_send_wr *bad; - int err; if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) return 0; - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; - prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key); + umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | + MLX5_IB_SEND_UMR_FAIL_IF_FREE; + umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.mkey = mr->mmkey.key; - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); - if (err) { - up(&umrc->sem); - mlx5_ib_dbg(dev, "err %d\n", err); - goto error; - } else { - wait_for_completion(&umr_context.done); - up(&umrc->sem); - } - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "unreg umr failed\n"); - err = -EFAULT; - goto error; - } - return 0; - -error: - return err; + return mlx5_ib_post_send_wait(dev, &umrwr); } -static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, - u64 length, int npages, int page_shift, int order, +static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, int access_flags, int flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct device *ddev = dev->ib_dev.dma_device; - struct mlx5_ib_umr_context umr_context; - struct ib_send_wr *bad; struct mlx5_umr_wr umrwr = {}; - struct ib_sge sg; - struct umr_common *umrc = &dev->umrc; - dma_addr_t dma = 0; - __be64 *mr_pas = NULL; - int size; int err; - mlx5_ib_init_umr_context(&umr_context); - - umrwr.wr.wr_cqe = &umr_context.cqe; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; - if (flags & IB_MR_REREG_TRANS) { - err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size, - &mr_pas, &dma); - if (err) - return err; + umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.mkey = mr->mmkey.key; - umrwr.target.virt_addr = virt_addr; - umrwr.length = length; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; - } - - prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, - page_shift); - - if (flags & IB_MR_REREG_PD) { + if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { umrwr.pd = pd; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD; - } - - if (flags & IB_MR_REREG_ACCESS) { umrwr.access_flags = access_flags; - umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS; + umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; } - /* post send request to UMR QP */ - down(&umrc->sem); - err = ib_post_send(umrc->qp, &umrwr.wr, &bad); - - if (err) { - mlx5_ib_warn(dev, "post send failed, err %d\n", err); - } else { - wait_for_completion(&umr_context.done); - if (umr_context.status != IB_WC_SUCCESS) { - mlx5_ib_warn(dev, "reg umr failed (%u)\n", - umr_context.status); - err = -EFAULT; - } - } + err = mlx5_ib_post_send_wait(dev, &umrwr); - up(&umrc->sem); - if (flags & IB_MR_REREG_TRANS) { - dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); - kfree(mr_pas); - } return err; } @@ -1364,6 +1236,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; int page_shift = 0; + int upd_flags = 0; int npages = 0; int ncont = 0; int order = 0; @@ -1372,6 +1245,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); + atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + if (flags != IB_MR_REREG_PD) { /* * Replace umem. This needs to be done whether or not UMR is @@ -1382,7 +1257,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { - mr->umem = NULL; + clean_mr(mr); return err; } } @@ -1414,32 +1289,37 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, /* * Send a UMR WQE */ - err = rereg_umr(pd, mr, addr, len, npages, page_shift, - order, access_flags, flags); + mr->ibmr.pd = pd; + mr->access_flags = access_flags; + mr->mmkey.iova = addr; + mr->mmkey.size = len; + mr->mmkey.pd = to_mpd(pd)->pdn; + + if (flags & IB_MR_REREG_TRANS) { + upd_flags = MLX5_IB_UPD_XLT_ADDR; + if (flags & IB_MR_REREG_PD) + upd_flags |= MLX5_IB_UPD_XLT_PD; + if (flags & IB_MR_REREG_ACCESS) + upd_flags |= MLX5_IB_UPD_XLT_ACCESS; + err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, + upd_flags); + } else { + err = rereg_umr(pd, mr, access_flags, flags); + } + if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); + ib_umem_release(mr->umem); + clean_mr(mr); return err; } } - if (flags & IB_MR_REREG_PD) { - ib_mr->pd = pd; - mr->mmkey.pd = to_mpd(pd)->pdn; - } + set_mr_fileds(dev, mr, npages, len, access_flags); - if (flags & IB_MR_REREG_ACCESS) - mr->access_flags = access_flags; - - if (flags & IB_MR_REREG_TRANS) { - atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); - set_mr_fileds(dev, mr, npages, len, access_flags); - mr->mmkey.iova = addr; - mr->mmkey.size = len; - } #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING update_odp_mr(mr); #endif - return 0; } @@ -1603,11 +1483,11 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); err = mlx5_alloc_priv_descs(pd->device, mr, - ndescs, sizeof(u64)); + ndescs, sizeof(struct mlx5_mtt)); if (err) goto err_free_in; - mr->desc_size = sizeof(u64); + mr->desc_size = sizeof(struct mlx5_mtt); mr->max_descs = ndescs; } else if (mr_type == IB_MR_TYPE_SG_GAPS) { mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; @@ -1656,6 +1536,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, if (err) goto err_destroy_psv; + mr->mmkey.type = MLX5_MKEY_MR; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; @@ -1736,6 +1617,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, if (err) goto free; + mw->mmkey.type = MLX5_MKEY_MW; mw->ibmw.rkey = mw->mmkey.key; resp.response_length = min(offsetof(typeof(resp), response_length) + diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index cacb631a7b0a..e5bc267aca73 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -41,13 +41,12 @@ * a pagefault. */ #define MMU_NOTIFIER_TIMEOUT 1000 -struct workqueue_struct *mlx5_ib_page_fault_wq; - void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end) { struct mlx5_ib_mr *mr; - const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1; + const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / + sizeof(struct mlx5_mtt)) - 1; u64 idx = 0, blk_start_idx = 0; int in_block = 0; u64 addr; @@ -90,16 +89,21 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, u64 umr_offset = idx & umr_block_mask; if (in_block && umr_offset == 0) { - mlx5_ib_update_mtt(mr, blk_start_idx, - idx - blk_start_idx, 1); + mlx5_ib_update_xlt(mr, blk_start_idx, + idx - blk_start_idx, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ZAP | + MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; } } } if (in_block) - mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1, - 1); - + mlx5_ib_update_xlt(mr, blk_start_idx, + idx - blk_start_idx + 1, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ZAP | + MLX5_IB_UPD_XLT_ATOMIC); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -120,6 +124,11 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) caps->general_caps = IB_ODP_SUPPORT; + if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + dev->odp_max_size = U64_MAX; + else + dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); + if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; @@ -135,6 +144,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; + return; } @@ -143,46 +155,51 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, { u32 base_key = mlx5_base_mkey(key); struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key); - struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + struct mlx5_ib_mr *mr; - if (!mmkey || mmkey->key != key || !mr->live) + if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR) + return NULL; + + mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + + if (!mr->live) return NULL; return container_of(mmkey, struct mlx5_ib_mr, mmkey); } -static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, +static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault, int error) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); - u32 qpn = qp->trans_qp.base.mqp.qpn; + int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? + pfault->wqe.wq_num : pfault->token; int ret = mlx5_core_page_fault_resume(dev->mdev, - qpn, - pfault->mpfault.flags, + pfault->token, + wq_num, + pfault->type, error); if (ret) - pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn); + mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n", + wq_num); } /* - * Handle a single data segment in a page-fault WQE. + * Handle a single data segment in a page-fault WQE or RDMA region. * - * Returns number of pages retrieved on success. The caller will continue to + * Returns number of pages retrieved on success. The caller may continue to * the next data segment. * Can return the following error codes: * -EAGAIN to designate a temporary error. The caller will abort handling the * page fault and resolve it. * -EFAULT when there's an error mapping the requested pages. The caller will - * abort the page fault handling and possibly move the QP to an error state. - * On other errors the QP should also be closed with an error. + * abort the page fault handling. */ -static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, +static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev, u32 key, u64 io_virt, size_t bcnt, + u32 *bytes_committed, u32 *bytes_mapped) { - struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); int srcu_key; unsigned int current_seq; u64 start_idx; @@ -208,12 +225,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, key); if (bytes_mapped) *bytes_mapped += - (bcnt - pfault->mpfault.bytes_committed); - goto srcu_unlock; - } - if (mr->ibmr.pd != qp->ibqp.pd) { - pr_err("Page-fault with different PDs for QP and MR.\n"); - ret = -EFAULT; + (bcnt - *bytes_committed); goto srcu_unlock; } @@ -229,8 +241,8 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, * in all iterations (in iteration 2 and above, * bytes_committed == 0). */ - io_virt += pfault->mpfault.bytes_committed; - bcnt -= pfault->mpfault.bytes_committed; + io_virt += *bytes_committed; + bcnt -= *bytes_committed; start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT; @@ -251,7 +263,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, * this MR, since ib_umem_odp_map_dma_pages already * checks this. */ - ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); + ret = mlx5_ib_update_xlt(mr, start_idx, npages, + PAGE_SHIFT, + MLX5_IB_UPD_XLT_ATOMIC); } else { ret = -EAGAIN; } @@ -287,7 +301,7 @@ srcu_unlock: } } srcu_read_unlock(&mib_dev->mr_srcu, srcu_key); - pfault->mpfault.bytes_committed = 0; + *bytes_committed = 0; return ret ? ret : npages; } @@ -309,8 +323,9 @@ srcu_unlock: * Returns the number of pages loaded if positive, zero for an empty WQE, or a * negative error code. */ -static int pagefault_data_segments(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault, void *wqe, +static int pagefault_data_segments(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void *wqe, void *wqe_end, u32 *bytes_mapped, u32 *total_wqe_bytes, int receive_queue) { @@ -354,22 +369,23 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp, if (!inline_segment && total_wqe_bytes) { *total_wqe_bytes += bcnt - min_t(size_t, bcnt, - pfault->mpfault.bytes_committed); + pfault->bytes_committed); } /* A zero length data segment designates a length of 2GB. */ if (bcnt == 0) bcnt = 1U << 31; - if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) { - pfault->mpfault.bytes_committed -= + if (inline_segment || bcnt <= pfault->bytes_committed) { + pfault->bytes_committed -= min_t(size_t, bcnt, - pfault->mpfault.bytes_committed); + pfault->bytes_committed); continue; } - ret = pagefault_single_data_segment(qp, pfault, key, io_virt, - bcnt, bytes_mapped); + ret = pagefault_single_data_segment(dev, key, io_virt, bcnt, + &pfault->bytes_committed, + bytes_mapped); if (ret < 0) break; npages += ret; @@ -378,17 +394,29 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp, return ret < 0 ? ret : npages; } +static const u32 mlx5_ib_odp_opcode_cap[] = { + [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND, + [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE, + [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE, + [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ, + [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC, + [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC, +}; + /* * Parse initiator WQE. Advances the wqe pointer to point at the * scatter-gather list, and set wqe_end to the end of the WQE. */ static int mlx5_ib_mr_initiator_pfault_handler( - struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, - void **wqe, void **wqe_end, int wqe_length) + struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_wqe_ctrl_seg *ctrl = *wqe; - u16 wqe_index = pfault->mpfault.wqe.wqe_index; + u16 wqe_index = pfault->wqe.wqe_index; + u32 transport_caps; + struct mlx5_base_av *av; unsigned ds, opcode; #if defined(DEBUG) u32 ctrl_wqe_index, ctrl_qpn; @@ -434,53 +462,49 @@ static int mlx5_ib_mr_initiator_pfault_handler( opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & MLX5_WQE_CTRL_OPCODE_MASK; + switch (qp->ibqp.qp_type) { case IB_QPT_RC: - switch (opcode) { - case MLX5_OPCODE_SEND: - case MLX5_OPCODE_SEND_IMM: - case MLX5_OPCODE_SEND_INVAL: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_SEND)) - goto invalid_transport_or_opcode; - break; - case MLX5_OPCODE_RDMA_WRITE: - case MLX5_OPCODE_RDMA_WRITE_IMM: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_WRITE)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_raddr_seg); - break; - case MLX5_OPCODE_RDMA_READ: - if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & - IB_ODP_SUPPORT_READ)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_raddr_seg); - break; - default: - goto invalid_transport_or_opcode; - } + transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps; break; case IB_QPT_UD: - switch (opcode) { - case MLX5_OPCODE_SEND: - case MLX5_OPCODE_SEND_IMM: - if (!(dev->odp_caps.per_transport_caps.ud_odp_caps & - IB_ODP_SUPPORT_SEND)) - goto invalid_transport_or_opcode; - *wqe += sizeof(struct mlx5_wqe_datagram_seg); - break; - default: - goto invalid_transport_or_opcode; - } + transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps; break; default: -invalid_transport_or_opcode: - mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n", - qp->ibqp.qp_type, opcode); + mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n", + qp->ibqp.qp_type); return -EFAULT; } + if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) / + sizeof(mlx5_ib_odp_opcode_cap[0]) || + !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { + mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n", + opcode); + return -EFAULT; + } + + if (qp->ibqp.qp_type != IB_QPT_RC) { + av = *wqe; + if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) + *wqe += sizeof(struct mlx5_av); + else + *wqe += sizeof(struct mlx5_base_av); + } + + switch (opcode) { + case MLX5_OPCODE_RDMA_WRITE: + case MLX5_OPCODE_RDMA_WRITE_IMM: + case MLX5_OPCODE_RDMA_READ: + *wqe += sizeof(struct mlx5_wqe_raddr_seg); + break; + case MLX5_OPCODE_ATOMIC_CS: + case MLX5_OPCODE_ATOMIC_FA: + *wqe += sizeof(struct mlx5_wqe_raddr_seg); + *wqe += sizeof(struct mlx5_wqe_atomic_seg); + break; + } + return 0; } @@ -489,10 +513,9 @@ invalid_transport_or_opcode: * scatter-gather list, and set wqe_end to the end of the WQE. */ static int mlx5_ib_mr_responder_pfault_handler( - struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, - void **wqe, void **wqe_end, int wqe_length) + struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, + struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_ib_wq *wq = &qp->rq; int wqe_size = 1 << wq->wqe_shift; @@ -529,70 +552,83 @@ invalid_transport_or_opcode: return 0; } -static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev, + u32 wq_num) +{ + struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num); + + if (!mqp) { + mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num); + return NULL; + } + + return to_mibqp(mqp); +} + +static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault) { - struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); int ret; void *wqe, *wqe_end; u32 bytes_mapped, total_wqe_bytes; char *buffer = NULL; - int resume_with_error = 0; - u16 wqe_index = pfault->mpfault.wqe.wqe_index; - int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; - u32 qpn = qp->trans_qp.base.mqp.qpn; + int resume_with_error = 1; + u16 wqe_index = pfault->wqe.wqe_index; + int requestor = pfault->type & MLX5_PFAULT_REQUESTOR; + struct mlx5_ib_qp *qp; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) { mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); - resume_with_error = 1; goto resolve_page_fault; } + qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num); + if (!qp) + goto resolve_page_fault; + ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, PAGE_SIZE, &qp->trans_qp.base); if (ret < 0) { - mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n", - -ret, wqe_index, qpn); - resume_with_error = 1; + mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n", + ret, wqe_index, pfault->token); goto resolve_page_fault; } wqe = buffer; if (requestor) - ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, + ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe, &wqe_end, ret); else - ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, + ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe, &wqe_end, ret); - if (ret < 0) { - resume_with_error = 1; + if (ret < 0) goto resolve_page_fault; - } if (wqe >= wqe_end) { mlx5_ib_err(dev, "ODP fault on invalid WQE.\n"); - resume_with_error = 1; goto resolve_page_fault; } - ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, - &total_wqe_bytes, !requestor); + ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end, + &bytes_mapped, &total_wqe_bytes, + !requestor); if (ret == -EAGAIN) { + resume_with_error = 0; goto resolve_page_fault; } else if (ret < 0 || total_wqe_bytes > bytes_mapped) { - mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n", - -ret); - resume_with_error = 1; + if (ret != -ENOENT) + mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n", + ret); goto resolve_page_fault; } + resume_with_error = 0; resolve_page_fault: - mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); - mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n", - qpn, resume_with_error, - pfault->mpfault.flags); - + mlx5_ib_page_fault_resume(dev, pfault, resume_with_error); + mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n", + pfault->token, resume_with_error, + pfault->type); free_page((unsigned long)buffer); } @@ -602,15 +638,14 @@ static int pages_in_range(u64 address, u32 length) (address & PAGE_MASK)) >> PAGE_SHIFT; } -static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault) { - struct mlx5_pagefault *mpfault = &pfault->mpfault; u64 address; u32 length; - u32 prefetch_len = mpfault->bytes_committed; + u32 prefetch_len = pfault->bytes_committed; int prefetch_activated = 0; - u32 rkey = mpfault->rdma.r_key; + u32 rkey = pfault->rdma.r_key; int ret; /* The RDMA responder handler handles the page fault in two parts. @@ -619,38 +654,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, * prefetches more pages. The second operation cannot use the pfault * context and therefore uses the dummy_pfault context allocated on * the stack */ - struct mlx5_ib_pfault dummy_pfault = {}; - - dummy_pfault.mpfault.bytes_committed = 0; + pfault->rdma.rdma_va += pfault->bytes_committed; + pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, + pfault->rdma.rdma_op_len); + pfault->bytes_committed = 0; - mpfault->rdma.rdma_va += mpfault->bytes_committed; - mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed, - mpfault->rdma.rdma_op_len); - mpfault->bytes_committed = 0; - - address = mpfault->rdma.rdma_va; - length = mpfault->rdma.rdma_op_len; + address = pfault->rdma.rdma_va; + length = pfault->rdma.rdma_op_len; /* For some operations, the hardware cannot tell the exact message * length, and in those cases it reports zero. Use prefetch * logic. */ if (length == 0) { prefetch_activated = 1; - length = mpfault->rdma.packet_size; + length = pfault->rdma.packet_size; prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len); } - ret = pagefault_single_data_segment(qp, pfault, rkey, address, length, - NULL); + ret = pagefault_single_data_segment(dev, rkey, address, length, + &pfault->bytes_committed, NULL); if (ret == -EAGAIN) { /* We're racing with an invalidation, don't prefetch */ prefetch_activated = 0; } else if (ret < 0 || pages_in_range(address, length) > ret) { - mlx5_ib_page_fault_resume(qp, pfault, 1); + mlx5_ib_page_fault_resume(dev, pfault, 1); + if (ret != -ENOENT) + mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", + ret, pfault->token, pfault->type); return; } - mlx5_ib_page_fault_resume(qp, pfault, 0); + mlx5_ib_page_fault_resume(dev, pfault, 0); + mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n", + pfault->token, pfault->type, + prefetch_activated); /* At this point, there might be a new pagefault already arriving in * the eq, switch to the dummy pagefault for the rest of the @@ -658,112 +695,39 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, * work-queue is being fenced. */ if (prefetch_activated) { - ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey, - address, + u32 bytes_committed = 0; + + ret = pagefault_single_data_segment(dev, rkey, address, prefetch_len, - NULL); + &bytes_committed, NULL); if (ret < 0) { - pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n", - ret, prefetch_activated, - qp->ibqp.qp_num, address, prefetch_len); + mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n", + ret, pfault->token, address, + prefetch_len); } } } -void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault) +void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, + struct mlx5_pagefault *pfault) { - u8 event_subtype = pfault->mpfault.event_subtype; + struct mlx5_ib_dev *dev = context; + u8 event_subtype = pfault->event_subtype; switch (event_subtype) { case MLX5_PFAULT_SUBTYPE_WQE: - mlx5_ib_mr_wqe_pfault_handler(qp, pfault); + mlx5_ib_mr_wqe_pfault_handler(dev, pfault); break; case MLX5_PFAULT_SUBTYPE_RDMA: - mlx5_ib_mr_rdma_pfault_handler(qp, pfault); + mlx5_ib_mr_rdma_pfault_handler(dev, pfault); break; default: - pr_warn("Invalid page fault event subtype: 0x%x\n", - event_subtype); - mlx5_ib_page_fault_resume(qp, pfault, 1); - break; + mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", + event_subtype); + mlx5_ib_page_fault_resume(dev, pfault, 1); } } -static void mlx5_ib_qp_pfault_action(struct work_struct *work) -{ - struct mlx5_ib_pfault *pfault = container_of(work, - struct mlx5_ib_pfault, - work); - enum mlx5_ib_pagefault_context context = - mlx5_ib_get_pagefault_context(&pfault->mpfault); - struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp, - pagefaults[context]); - mlx5_ib_mr_pfault_handler(qp, pfault); -} - -void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) -{ - unsigned long flags; - - spin_lock_irqsave(&qp->disable_page_faults_lock, flags); - qp->disable_page_faults = 1; - spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); - - /* - * Note that at this point, we are guarenteed that no more - * work queue elements will be posted to the work queue with - * the QP we are closing. - */ - flush_workqueue(mlx5_ib_page_fault_wq); -} - -void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) -{ - unsigned long flags; - - spin_lock_irqsave(&qp->disable_page_faults_lock, flags); - qp->disable_page_faults = 0; - spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); -} - -static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp, - struct mlx5_pagefault *pfault) -{ - /* - * Note that we will only get one fault event per QP per context - * (responder/initiator, read/write), until we resolve the page fault - * with the mlx5_ib_page_fault_resume command. Since this function is - * called from within the work element, there is no risk of missing - * events. - */ - struct mlx5_ib_qp *mibqp = to_mibqp(qp); - enum mlx5_ib_pagefault_context context = - mlx5_ib_get_pagefault_context(pfault); - struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context]; - - qp_pfault->mpfault = *pfault; - - /* No need to stop interrupts here since we are in an interrupt */ - spin_lock(&mibqp->disable_page_faults_lock); - if (!mibqp->disable_page_faults) - queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work); - spin_unlock(&mibqp->disable_page_faults_lock); -} - -void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) -{ - int i; - - qp->disable_page_faults = 1; - spin_lock_init(&qp->disable_page_faults_lock); - - qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler; - - for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i) - INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); -} - int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { int ret; @@ -780,17 +744,3 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) cleanup_srcu_struct(&ibdev->mr_srcu); } -int __init mlx5_ib_odp_init(void) -{ - mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults", - WQ_MEM_RECLAIM); - if (!mlx5_ib_page_fault_wq) - return -ENOMEM; - - return 0; -} - -void mlx5_ib_odp_cleanup(void) -{ - destroy_workqueue(mlx5_ib_page_fault_wq); -} diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a1b3125f0a6e..6a83fb32599d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr) return 1; } -static int first_med_uuar(void) +static int first_med_bfreg(void) { return 1; } -static int next_uuar(int n) -{ - n++; - - while (((n % 4) & 2)) - n++; +enum { + /* this is the first blue flame register in the array of bfregs assigned + * to a processes. Since we do not use it for blue flame but rather + * regular 64 bit doorbells, we do not need a lock for maintaiing + * "odd/even" order + */ + NUM_NON_BLUE_FLAME_BFREGS = 1, +}; - return n; +static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) +{ + return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR; } -static int num_med_uuar(struct mlx5_uuar_info *uuari) +static int num_med_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int n; - n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - - uuari->num_low_latency_uuars - 1; + n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - + NUM_NON_BLUE_FLAME_BFREGS; return n >= 0 ? n : 0; } -static int max_uuari(struct mlx5_uuar_info *uuari) -{ - return uuari->num_uars * 4; -} - -static int first_hi_uuar(struct mlx5_uuar_info *uuari) +static int first_hi_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int med; - int i; - int t; - - med = num_med_uuar(uuari); - for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { - t++; - if (t == med) - return next_uuar(i); - } - return 0; + med = num_med_bfreg(dev, bfregi); + return ++med; } -static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) +static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { int i; - for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { - if (!test_bit(i, uuari->bitmap)) { - set_bit(i, uuari->bitmap); - uuari->count[i]++; + for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { + if (!bfregi->count[i]) { + bfregi->count[i]++; return i; } } @@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) return -ENOMEM; } -static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) +static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) { - int minidx = first_med_uuar(); + int minidx = first_med_bfreg(); int i; - for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { - if (uuari->count[i] < uuari->count[minidx]) + for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { + if (bfregi->count[i] < bfregi->count[minidx]) minidx = i; + if (!bfregi->count[minidx]) + break; } - uuari->count[minidx]++; + bfregi->count[minidx]++; return minidx; } -static int alloc_uuar(struct mlx5_uuar_info *uuari, - enum mlx5_ib_latency_class lat) +static int alloc_bfreg(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, + enum mlx5_ib_latency_class lat) { - int uuarn = -EINVAL; + int bfregn = -EINVAL; - mutex_lock(&uuari->lock); + mutex_lock(&bfregi->lock); switch (lat) { case MLX5_IB_LATENCY_CLASS_LOW: - uuarn = 0; - uuari->count[uuarn]++; + BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); + bfregn = 0; + bfregi->count[bfregn]++; break; case MLX5_IB_LATENCY_CLASS_MEDIUM: - if (uuari->ver < 2) - uuarn = -ENOMEM; + if (bfregi->ver < 2) + bfregn = -ENOMEM; else - uuarn = alloc_med_class_uuar(uuari); + bfregn = alloc_med_class_bfreg(dev, bfregi); break; case MLX5_IB_LATENCY_CLASS_HIGH: - if (uuari->ver < 2) - uuarn = -ENOMEM; + if (bfregi->ver < 2) + bfregn = -ENOMEM; else - uuarn = alloc_high_class_uuar(uuari); - break; - - case MLX5_IB_LATENCY_CLASS_FAST_PATH: - uuarn = 2; + bfregn = alloc_high_class_bfreg(dev, bfregi); break; } - mutex_unlock(&uuari->lock); - - return uuarn; -} + mutex_unlock(&bfregi->lock); -static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) -{ - clear_bit(uuarn, uuari->bitmap); - --uuari->count[uuarn]; -} - -static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) -{ - clear_bit(uuarn, uuari->bitmap); - --uuari->count[uuarn]; + return bfregn; } -static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) +static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) { - int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; - int high_uuar = nuuars - uuari->num_low_latency_uuars; - - mutex_lock(&uuari->lock); - if (uuarn == 0) { - --uuari->count[uuarn]; - goto out; - } - - if (uuarn < high_uuar) { - free_med_class_uuar(uuari, uuarn); - goto out; - } - - free_high_class_uuar(uuari, uuarn); - -out: - mutex_unlock(&uuari->lock); + mutex_lock(&bfregi->lock); + bfregi->count[bfregn]--; + mutex_unlock(&bfregi->lock); } static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) @@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq); -static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) +static int bfregn_to_uar_index(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, int bfregn) { - return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; + int bfregs_per_sys_page; + int index_of_sys_page; + int offset; + + bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * + MLX5_NON_FP_BFREGS_PER_UAR; + index_of_sys_page = bfregn / bfregs_per_sys_page; + + offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; + + return bfregi->sys_pages[index_of_sys_page] + offset; } static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, @@ -762,6 +740,13 @@ err_umem: return err; } +static int adjust_bfregn(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi, int bfregn) +{ + return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + + bfregn % MLX5_NON_FP_BFREGS_PER_UAR; +} + static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct ib_qp_init_attr *attr, @@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, int uar_index; int npages; u32 offset = 0; - int uuarn; + int bfregn; int ncont = 0; __be64 *pas; void *qpc; @@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, */ if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) /* In CROSS_CHANNEL CQ and QP must use the same UAR */ - uuarn = MLX5_CROSS_CHANNEL_UUAR; + bfregn = MLX5_CROSS_CHANNEL_BFREG; else { - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); - if (uuarn < 0) { - mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH); + if (bfregn < 0) { + mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n"); - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); - if (uuarn < 0) { - mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM); + if (bfregn < 0) { + mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n"); mlx5_ib_dbg(dev, "reverting to high latency\n"); - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); - if (uuarn < 0) { - mlx5_ib_warn(dev, "uuar allocation failed\n"); - return uuarn; + bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW); + if (bfregn < 0) { + mlx5_ib_warn(dev, "bfreg allocation failed\n"); + return bfregn; } } } } - uar_index = uuarn_to_uar_index(&context->uuari, uuarn); - mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); + uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn); + mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); qp->rq.offset = 0; qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); @@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = set_user_buf_size(dev, qp, &ucmd, base, attr); if (err) - goto err_uuar; + goto err_bfreg; if (ucmd.buf_addr && ubuffer->buf_size) { ubuffer->buf_addr = ucmd.buf_addr; @@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, &ubuffer->umem, &npages, &page_shift, &ncont, &offset); if (err) - goto err_uuar; + goto err_bfreg; } else { ubuffer->umem = NULL; } @@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, MLX5_SET(qpc, qpc, page_offset, offset); MLX5_SET(qpc, qpc, uar_page, uar_index); - resp->uuar_index = uuarn; - qp->uuarn = uuarn; + resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); + qp->bfregn = bfregn; err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); if (err) { @@ -882,13 +867,13 @@ err_umem: if (ubuffer->umem) ib_umem_release(ubuffer->umem); -err_uuar: - free_uuar(&context->uuari, uuarn); +err_bfreg: + free_bfreg(dev, &context->bfregi, bfregn); return err; } -static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, - struct mlx5_ib_qp_base *base) +static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base) { struct mlx5_ib_ucontext *context; @@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, mlx5_ib_db_unmap_user(context, &qp->db); if (base->ubuffer.umem) ib_umem_release(base->ubuffer.umem); - free_uuar(&context->uuari, qp->uuarn); + free_bfreg(dev, &context->bfregi, qp->bfregn); } static int create_kernel_qp(struct mlx5_ib_dev *dev, @@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, u32 **in, int *inlen, struct mlx5_ib_qp_base *base) { - enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; - struct mlx5_uuar_info *uuari; int uar_index; void *qpc; - int uuarn; int err; - uuari = &dev->mdev->priv.uuari; if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_IPOIB_UD_LSO | @@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) - lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; - - uuarn = alloc_uuar(uuari, lc); - if (uuarn < 0) { - mlx5_ib_dbg(dev, "\n"); - return -ENOMEM; - } + qp->bf.bfreg = &dev->fp_bfreg; + else + qp->bf.bfreg = &dev->bfreg; - qp->bf = &uuari->bfs[uuarn]; - uar_index = qp->bf->uar->index; + qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); + uar_index = qp->bf.bfreg->index; err = calc_sq_size(dev, init_attr, qp); if (err < 0) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_uuar; + return err; } qp->rq.offset = 0; @@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); - goto err_uuar; + return err; } qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); @@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, return 0; err_wrid: - mlx5_db_free(dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); + mlx5_db_free(dev->mdev, &qp->db); err_free: kvfree(*in); err_buf: mlx5_buf_free(dev->mdev, &qp->buf); - -err_uuar: - free_uuar(&dev->mdev->priv.uuari, uuarn); return err; } static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { - mlx5_db_free(dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); + mlx5_db_free(dev->mdev, &qp->db); mlx5_buf_free(dev->mdev, &qp->buf); - free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); } static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) @@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (init_attr->create_flags || init_attr->send_cq) return -EINVAL; - min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index); + min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index); if (udata->outlen < min_resp_len) return -EINVAL; @@ -1526,9 +1499,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, &qp->raw_packet_qp.rq.base : &qp->trans_qp.base; - if (init_attr->qp_type != IB_QPT_RAW_PACKET) - mlx5_ib_odp_create_qp(qp); - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -1795,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err_create: if (qp->create_type == MLX5_QP_USER) - destroy_qp_user(pd, qp, base); + destroy_qp_user(dev, pd, qp, base); else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); @@ -1923,7 +1893,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) if (qp->state != IB_QPS_RESET) { if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { - mlx5_ib_qp_disable_pagefaults(qp); err = mlx5_core_qp_modify(dev->mdev, MLX5_CMD_OP_2RST_QP, 0, NULL, &base->mqp); @@ -1974,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); else if (qp->create_type == MLX5_QP_USER) - destroy_qp_user(&get_pd(qp)->ibpd, qp, base); + destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base); } static const char *ib_qp_type_str(enum ib_qp_type type) @@ -2823,16 +2792,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (mlx5_st < 0) goto out; - /* If moving to a reset or error state, we must disable page faults on - * this QP and flush all current page faults. Otherwise a stale page - * fault may attempt to work on this QP after it is reset and moved - * again to RTS, and may cause the driver and the device to get out of - * sync. */ - if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && - (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) && - (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) - mlx5_ib_qp_disable_pagefaults(qp); - if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || !optab[mlx5_cur][mlx5_new]) goto out; @@ -2864,10 +2823,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (err) goto out; - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT && - (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) - mlx5_ib_qp_enable_pagefaults(qp); - qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) @@ -3080,9 +3035,10 @@ static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) dseg->addr = cpu_to_be64(sg->addr); } -static __be16 get_klm_octo(int npages) +static u64 get_xlt_octo(u64 bytes) { - return cpu_to_be16(ALIGN(npages, 8) / 2); + return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) / + MLX5_IB_UMR_OCTOWORD; } static __be64 frwr_mkey_mask(void) @@ -3127,18 +3083,14 @@ static __be64 sig_mkey_mask(void) } static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, - struct mlx5_ib_mr *mr) + struct mlx5_ib_mr *mr) { - int ndescs = mr->ndescs; + int size = mr->ndescs * mr->desc_size; memset(umr, 0, sizeof(*umr)); - if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) - /* KLMs take twice the size of MTTs */ - ndescs *= 2; - umr->flags = MLX5_UMR_CHECK_NOT_FREE; - umr->klm_octowords = get_klm_octo(ndescs); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); umr->mkey_mask = frwr_mkey_mask(); } @@ -3149,37 +3101,17 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) umr->flags = MLX5_UMR_INLINE; } -static __be64 get_umr_reg_mr_mask(int atomic) +static __be64 get_umr_enable_mr_mask(void) { u64 result; - result = MLX5_MKEY_MASK_LEN | - MLX5_MKEY_MASK_PAGE_SIZE | - MLX5_MKEY_MASK_START_ADDR | - MLX5_MKEY_MASK_PD | - MLX5_MKEY_MASK_LR | - MLX5_MKEY_MASK_LW | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_RR | - MLX5_MKEY_MASK_RW | + result = MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_FREE; - if (atomic) - result |= MLX5_MKEY_MASK_A; - return cpu_to_be64(result); } -static __be64 get_umr_unreg_mr_mask(void) -{ - u64 result; - - result = MLX5_MKEY_MASK_FREE; - - return cpu_to_be64(result); -} - -static __be64 get_umr_update_mtt_mask(void) +static __be64 get_umr_disable_mr_mask(void) { u64 result; @@ -3194,23 +3126,22 @@ static __be64 get_umr_update_translation_mask(void) result = MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_PAGE_SIZE | - MLX5_MKEY_MASK_START_ADDR | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + MLX5_MKEY_MASK_START_ADDR; return cpu_to_be64(result); } -static __be64 get_umr_update_access_mask(void) +static __be64 get_umr_update_access_mask(int atomic) { u64 result; - result = MLX5_MKEY_MASK_LW | + result = MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | - MLX5_MKEY_MASK_RW | - MLX5_MKEY_MASK_A | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + MLX5_MKEY_MASK_RW; + + if (atomic) + result |= MLX5_MKEY_MASK_A; return cpu_to_be64(result); } @@ -3219,9 +3150,7 @@ static __be64 get_umr_update_pd_mask(void) { u64 result; - result = MLX5_MKEY_MASK_PD | - MLX5_MKEY_MASK_KEY | - MLX5_MKEY_MASK_FREE; + result = MLX5_MKEY_MASK_PD; return cpu_to_be64(result); } @@ -3238,24 +3167,24 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, else umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ - if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { - umr->klm_octowords = get_klm_octo(umrwr->npages); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { - umr->mkey_mask = get_umr_update_mtt_mask(); - umr->bsf_octowords = get_klm_octo(umrwr->target.offset); - umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; - } - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) - umr->mkey_mask |= get_umr_update_translation_mask(); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS) - umr->mkey_mask |= get_umr_update_access_mask(); - if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) - umr->mkey_mask |= get_umr_update_pd_mask(); - if (!umr->mkey_mask) - umr->mkey_mask = get_umr_reg_mr_mask(atomic); - } else { - umr->mkey_mask = get_umr_unreg_mr_mask(); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { + u64 offset = get_xlt_octo(umrwr->offset); + + umr->xlt_offset = cpu_to_be16(offset & 0xffff); + umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16); + umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; + } + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) + umr->mkey_mask |= get_umr_update_translation_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { + umr->mkey_mask |= get_umr_update_access_mask(atomic); + umr->mkey_mask |= get_umr_update_pd_mask(); } + if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) + umr->mkey_mask |= get_umr_enable_mr_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) + umr->mkey_mask |= get_umr_disable_mr_mask(); if (!wr->num_sge) umr->flags |= MLX5_UMR_INLINE; @@ -3303,17 +3232,17 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w struct mlx5_umr_wr *umrwr = umr_wr(wr); memset(seg, 0, sizeof(*seg)); - if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { + if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) seg->status = MLX5_MKEY_STATUS_FREE; - return; - } seg->flags = convert_access(umrwr->access_flags); - if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { - if (umrwr->pd) - seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); - seg->start_addr = cpu_to_be64(umrwr->target.virt_addr); - } + if (umrwr->pd) + seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && + !umrwr->length) + seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); + + seg->start_addr = cpu_to_be64(umrwr->virt_addr); seg->len = cpu_to_be64(umrwr->length); seg->log2_page_size = umrwr->page_shift; seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | @@ -3611,7 +3540,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr, } static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, - struct ib_sig_handover_wr *wr, u32 nelements, + struct ib_sig_handover_wr *wr, u32 size, u32 length, u32 pdn) { struct ib_mr *sig_mr = wr->sig_mr; @@ -3626,17 +3555,17 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | MLX5_MKEY_BSF_EN | pdn); seg->len = cpu_to_be64(length); - seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); + seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); } static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, - u32 nelements) + u32 size) { memset(umr, 0, sizeof(*umr)); umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; - umr->klm_octowords = get_klm_octo(nelements); + umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); umr->mkey_mask = sig_mkey_mask(); } @@ -3648,7 +3577,7 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); u32 pdn = get_pd(qp)->pdn; - u32 klm_oct_size; + u32 xlt_size; int region_len, ret; if (unlikely(wr->wr.num_sge != 1) || @@ -3670,15 +3599,15 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, * then we use strided block format (3 octowords), * else we use single KLM (1 octoword) **/ - klm_oct_size = wr->prot ? 3 : 1; + xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm); - set_sig_umr_segment(*seg, klm_oct_size); + set_sig_umr_segment(*seg, xlt_size); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); - set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); + set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn); *seg += sizeof(struct mlx5_mkey_seg); *size += sizeof(struct mlx5_mkey_seg) / 16; if (unlikely((*seg == qp->sq.qend))) @@ -3784,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) } } -static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, - unsigned bytecnt, struct mlx5_ib_qp *qp) -{ - while (bytecnt > 0) { - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - __iowrite64_copy(dst++, src++, 8); - bytecnt -= 64; - if (unlikely(src == qp->sq.qend)) - src = mlx5_get_send_wqe(qp, 0); - } -} - static u8 get_fence(u8 fence, struct ib_send_wr *wr) { if (unlikely(wr->opcode == IB_WR_LOCAL_INV && @@ -3897,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); qp = to_mqp(ibqp); - bf = qp->bf; + bf = &qp->bf; qend = qp->sq.qend; spin_lock_irqsave(&qp->sq.lock, flags); @@ -4170,28 +4081,13 @@ out: * we hit doorbell */ wmb(); - if (bf->need_lock) - spin_lock(&bf->lock); - else - __acquire(&bf->lock); - - /* TBD enable WC */ - if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { - mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); - /* wc_wmb(); */ - } else { - mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, - MLX5_GET_DOORBELL_LOCK(&bf->lock32)); - /* Make sure doorbells don't leak out of SQ spinlock - * and reach the HCA out of order. - */ - mmiowb(); - } + /* currently we support only regular doorbells */ + mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL); + /* Make sure doorbells don't leak out of SQ spinlock + * and reach the HCA out of order. + */ + mmiowb(); bf->offset ^= bf->buf_size; - if (bf->need_lock) - spin_unlock(&bf->lock); - else - __release(&bf->lock); } spin_unlock_irqrestore(&qp->sq.lock, flags); @@ -4559,14 +4455,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - /* - * Wait for any outstanding page faults, in case the user frees memory - * based upon this query's result. - */ - flush_workqueue(mlx5_ib_page_fault_wq); -#endif - mutex_lock(&qp->mutex); if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 296f1411fe84..3b11422b1cce 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *); static void listen_check(DIVA_CAPI_ADAPTER *); static byte AddInfo(byte **, byte **, byte *, byte *); static byte getChannel(API_PARSE *); -static void IndParse(PLCI *, word *, byte **, byte); +static void IndParse(PLCI *, const word *, byte **, byte); static byte ie_compare(byte *, byte *); static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *); static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word); @@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci) /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */ /* SMSG is situated at the end because its 0 (for compatibility reasons */ /* (see Info_Mask Bit 4, first IE. then the message type) */ - word parms_id[] = + static const word parms_id[] = {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA, UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW, RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR, @@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci) /* 14 FTY repl by ESC_CHI */ /* 18 PI repl by ESC_LAW */ /* removed OAD changed to 0xff for future use, OAD is multiIE now */ - word multi_fac_id[] = {1, FTY}; - word multi_pi_id[] = {1, PI}; - word multi_CiPN_id[] = {1, OAD}; - word multi_ssext_id[] = {1, ESC_SSEXT}; + static const word multi_fac_id[] = {1, FTY}; + static const word multi_pi_id[] = {1, PI}; + static const word multi_CiPN_id[] = {1, OAD}; + static const word multi_ssext_id[] = {1, ESC_SSEXT}; - word multi_vswitch_id[] = {1, ESC_VSWITCH}; + static const word multi_vswitch_id[] = {1, ESC_VSWITCH}; byte *cau; word ncci; @@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a) /* functions for all parameters sent in INDs */ /*------------------------------------------------------------------*/ -static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize) +static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize) { word ploc; /* points to current location within packet */ byte w; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8029dd4912b6..e6af04716cf7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -211,8 +211,8 @@ static int lacp_fast; static int bond_init(struct net_device *bond_dev); static void bond_uninit(struct net_device *bond_dev); -static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, - struct rtnl_link_stats64 *stats); +static void bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats); static void bond_slave_arr_handler(struct work_struct *work); static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, int mod); @@ -1993,11 +1993,10 @@ static int bond_release_and_destroy(struct net_device *bond_dev, return ret; } -static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) +static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) { struct bonding *bond = netdev_priv(bond_dev); bond_fill_ifbond(bond, info); - return 0; } static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) @@ -3337,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } -static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, - struct rtnl_link_stats64 *stats) +static void bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats) { struct bonding *bond = netdev_priv(bond_dev); struct rtnl_link_stats64 temp; @@ -3362,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, memcpy(&bond->bond_stats, stats, sizeof(*stats)); spin_unlock(&bond->stats_lock); - - return stats; } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) @@ -3411,12 +3408,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond))) return -EFAULT; - res = bond_info_query(bond_dev, &k_binfo); - if (res == 0 && - copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) + bond_info_query(bond_dev, &k_binfo); + if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond))) return -EFAULT; - return res; + return 0; case BOND_SLAVE_INFO_QUERY_OLD: case SIOCBONDSLAVEINFOQUERY: u_sinfo = (struct ifslave __user *)ifr->ifr_data; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 8f5e93cb7975..0e0df0ba288c 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota) u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); - napi_complete(napi); + napi_complete_done(napi, work_done); at91_write(priv, AT91_IER, reg_ier); } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3dccd3200d5..606b7d8ffe13 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) end: if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable all IRQs if we are not in bus off state */ if (priv->can.state != CAN_STATE_BUS_OFF) c_can_irq_control(priv, true); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 8d6208c0b400..611d16a7061d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, return 0; } +/* Checks the validity of predefined bitrate settings */ +static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) +{ + struct can_priv *priv = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < bitrate_const_cnt; i++) { + if (bt->bitrate == bitrate_const[i]) + break; + } + + if (i >= priv->bitrate_const_cnt) + return -EINVAL; + + return 0; +} + static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, - const struct can_bittiming_const *btc) + const struct can_bittiming_const *btc, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) { int err; - /* Check if the CAN device has bit-timing parameters */ - if (!btc) - return -EOPNOTSUPP; - /* * Depending on the given can_bittiming parameter structure the CAN * timing parameters are calculated based on the provided bitrate OR * alternatively the CAN timing parameters (tq, prop_seg, etc.) are * provided directly which are then checked and fixed up. */ - if (!bt->tq && bt->bitrate) + if (!bt->tq && bt->bitrate && btc) err = can_calc_bittiming(dev, bt, btc); - else if (bt->tq && !bt->bitrate) + else if (bt->tq && !bt->bitrate && btc) err = can_fixup_bittiming(dev, bt, btc); + else if (!bt->tq && bt->bitrate && bitrate_const) + err = can_validate_bitrate(dev, bt, bitrate_const, + bitrate_const_cnt); else err = -EINVAL; @@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming) + return -EOPNOTSUPP; + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, priv->bittiming_const); + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt); if (err) return err; memcpy(&priv->bittiming, &bt, sizeof(bt)); @@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev, /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; + + /* Calculate bittiming parameters based on + * data_bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->data_bittiming_const && !priv->do_set_data_bittiming) + return -EOPNOTSUPP; + memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); - err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const); + err = can_get_bittiming(dev, &dbt, + priv->data_bittiming_const, + priv->data_bitrate_const, + priv->data_bitrate_const_cnt); if (err) return err; memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); @@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev, } } + if (data[IFLA_CAN_TERMINATION]) { + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); + const unsigned int num_term = priv->termination_const_cnt; + unsigned int i; + + if (!priv->do_set_termination) + return -EOPNOTSUPP; + + /* check whether given value is supported by the interface */ + for (i = 0; i < num_term; i++) { + if (termval == priv->termination_const[i]) + break; + } + if (i >= num_term) + return -EINVAL; + + /* Finally, set the termination value */ + err = priv->do_set_termination(dev, termval); + if (err) + return err; + + priv->termination = termval; + } + return 0; } @@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(struct can_bittiming)); if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); + if (priv->termination_const) { + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ + priv->termination_const_cnt); + } + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt); + if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt); return size; } @@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) (priv->data_bittiming_const && nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const))) + priv->data_bittiming_const)) || + + (priv->termination_const && + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || + nla_put(skb, IFLA_CAN_TERMINATION_CONST, + sizeof(*priv->termination_const) * + priv->termination_const_cnt, + priv->termination_const))) || + + (priv->bitrate_const && + nla_put(skb, IFLA_CAN_BITRATE_CONST, + sizeof(*priv->bitrate_const) * + priv->bitrate_const_cnt, + priv->bitrate_const)) || + + (priv->data_bitrate_const && + nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, + sizeof(*priv->data_bitrate_const) * + priv->data_bitrate_const_cnt, + priv->data_bitrate_const)) + ) + return -EMSGSIZE; return 0; @@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { */ int register_candev(struct net_device *dev) { + struct can_priv *priv = netdev_priv(dev); + + /* Ensure termination_const, termination_const_cnt and + * do_set_termination consistency. All must be either set or + * unset. + */ + if ((!priv->termination_const != !priv->termination_const_cnt) || + (!priv->termination_const != !priv->do_set_termination)) + return -EINVAL; + + if (!priv->bitrate_const != !priv->bitrate_const_cnt) + return -EINVAL; + + if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) + return -EINVAL; + dev->rtnl_link_ops = &can_link_ops; return register_netdev(dev); } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 16f7cadda5c3..43cfce8b076b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_struct *napi, int quota) work_done += flexcan_poll_bus_err(dev, reg_esr); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* enable IRQs */ flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); flexcan_write(priv->reg_ctrl_default, ®s->ctrl); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 368bb0710d8f..138f5ae75c0b 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ifi_canfd_irq_enable(ndev, 1); } diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f13bb8d9bb84..2ba1a81500c1 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) - napi_complete(napi); + napi_complete_done(napi, received); spin_lock_irqsave(&mod->lock, flags); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 195f15edb32e..7a6554efd42b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota) work_done += m_can_do_rx_poll(dev, (quota - work_done)); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); m_can_enable_all_interrupts(priv); } diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 788459f6bf5c..caed4e6960f8 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota) } /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); priv->ier |= RCAR_CAN_IER_RXFIE; writeb(priv->ier, &priv->regs->ier); } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 43cdd5544b0c..4ef07d97156d 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* All packets processed */ if (num_pkts < quota) { - napi_complete(napi); + napi_complete_done(napi, num_pkts); /* Enable Rx FIFO interrupts */ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFIE); diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index cdc0c7433a4b..4d4492884e0b 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -310,7 +310,7 @@ pcmcia_bad: pcmcia_failed: pcmcia_disable_device(pcmcia); pcmcia->priv = NULL; - return ret ?: -ENODEV; + return ret; } static const struct pcmcia_device_id softingcs_ids[] = { diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index c71a03593595..89aec07c225f 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) can_led_event(ndev, CAN_LED_EVENT_RX); if (work_done < quota) { - napi_complete(napi); + napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); priv->write_reg(priv, XCAN_IER_OFFSET, ier); diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 8346e4f9737a..a3c941632217 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o +obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o +bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-y += b53/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 947adda3397d..8cf4801994e8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -712,7 +712,7 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_SIZE; } -static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); @@ -723,9 +723,9 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) memcpy(data + i * ETH_GSTRING_LEN, mibs[i].name, ETH_GSTRING_LEN); } +EXPORT_SYMBOL(b53_get_strings); -static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data) +void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); @@ -756,13 +756,15 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, mutex_unlock(&dev->stats_mutex); } +EXPORT_SYMBOL(b53_get_ethtool_stats); -static int b53_get_sset_count(struct dsa_switch *ds) +int b53_get_sset_count(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; return b53_get_mib_size(dev); } +EXPORT_SYMBOL(b53_get_sset_count); static int b53_setup(struct dsa_switch *ds) { @@ -921,15 +923,15 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, } } -static int b53_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering) +int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { return 0; } +EXPORT_SYMBOL(b53_vlan_filtering); -static int b53_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct b53_device *dev = ds->priv; @@ -943,10 +945,11 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_vlan_prepare); -static void b53_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -977,9 +980,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } } +EXPORT_SYMBOL(b53_vlan_add); -static int b53_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -1015,10 +1019,11 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_vlan_del); -static int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) +int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) { struct b53_device *dev = ds->priv; u16 vid, vid_start = 0, pvid; @@ -1057,6 +1062,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port, return err; } +EXPORT_SYMBOL(b53_vlan_dump); /* Address Resolution Logic routines */ static int b53_arl_op_wait(struct b53_device *dev) @@ -1137,7 +1143,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, int ret; /* Convert the array into a 64-bit MAC */ - mac = b53_mac_to_u64(addr); + mac = ether_addr_to_u64(addr); /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); @@ -1175,9 +1181,9 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return b53_arl_rw_op(dev, 0); } -static int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct b53_device *priv = ds->priv; @@ -1189,24 +1195,27 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_fdb_prepare); -static void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) +void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) { struct b53_device *priv = ds->priv; if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) pr_err("%s: failed to add MAC address\n", __func__); } +EXPORT_SYMBOL(b53_fdb_add); -static int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) +int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) { struct b53_device *priv = ds->priv; return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); } +EXPORT_SYMBOL(b53_fdb_del); static int b53_arl_search_wait(struct b53_device *dev) { @@ -1258,9 +1267,9 @@ static int b53_fdb_copy(struct net_device *dev, int port, return cb(&fdb->obj); } -static int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) +int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) { struct b53_device *priv = ds->priv; struct net_device *dev = ds->ports[port].netdev; @@ -1297,9 +1306,9 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_fdb_dump); -static int b53_br_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; s8 cpu_port = ds->dst->cpu_port; @@ -1317,11 +1326,10 @@ static int b53_br_join(struct dsa_switch *ds, int port, b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); } - dev->ports[port].bridge_dev = bridge; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); b53_for_each_port(dev, i) { - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this local port to the remote port VLAN control @@ -1343,11 +1351,11 @@ static int b53_br_join(struct dsa_switch *ds, int port, return 0; } +EXPORT_SYMBOL(b53_br_join); -static void b53_br_leave(struct dsa_switch *ds, int port) +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - struct net_device *bridge = dev->ports[port].bridge_dev; struct b53_vlan *vl = &dev->vlans[0]; s8 cpu_port = ds->dst->cpu_port; unsigned int i; @@ -1357,7 +1365,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port) b53_for_each_port(dev, i) { /* Don't touch the remaining ports */ - if (dev->ports[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); @@ -1372,7 +1380,6 @@ static void b53_br_leave(struct dsa_switch *ds, int port) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - dev->ports[port].bridge_dev = NULL; if (is5325(dev) || is5365(dev)) pvid = 1; @@ -1393,8 +1400,9 @@ static void b53_br_leave(struct dsa_switch *ds, int port) b53_set_vlan_entry(dev, pvid, vl); } } +EXPORT_SYMBOL(b53_br_leave); -static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) +void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) { struct b53_device *dev = ds->priv; u8 hw_state; @@ -1426,21 +1434,88 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) reg |= hw_state; b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); } +EXPORT_SYMBOL(b53_br_set_stp_state); -static void b53_br_fast_age(struct dsa_switch *ds, int port) +void b53_br_fast_age(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; if (b53_fast_age_port(dev, port)) dev_err(ds->dev, "fast ageing failed\n"); } +EXPORT_SYMBOL(b53_br_fast_age); static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) { return DSA_TAG_PROTO_NONE; } -static struct dsa_switch_ops b53_switch_ops = { +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress) +{ + struct b53_device *dev = ds->priv; + u16 reg, loc; + + if (ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~MIRROR_MASK; + reg |= BIT(port); + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + reg &= ~CAP_PORT_MASK; + reg |= mirror->to_local_port; + reg |= MIRROR_EN; + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); + + return 0; +} +EXPORT_SYMBOL(b53_mirror_add); + +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct b53_device *dev = ds->priv; + bool loc_disable = false, other_loc_disable = false; + u16 reg, loc; + + if (mirror->ingress) + loc = B53_IG_MIR_CTL; + else + loc = B53_EG_MIR_CTL; + + /* Update the desired ingress/egress register */ + b53_read16(dev, B53_MGMT_PAGE, loc, ®); + reg &= ~BIT(port); + if (!(reg & MIRROR_MASK)) + loc_disable = true; + b53_write16(dev, B53_MGMT_PAGE, loc, reg); + + /* Now look at the other one to know if we can disable mirroring + * entirely + */ + if (mirror->ingress) + b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®); + else + b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®); + if (!(reg & MIRROR_MASK)) + other_loc_disable = true; + + b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®); + /* Both no longer have ports, let's disable mirroring */ + if (loc_disable && other_loc_disable) { + reg &= ~MIRROR_EN; + reg &= ~mirror->to_local_port; + } + b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg); +} +EXPORT_SYMBOL(b53_mirror_del); + +static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, .get_strings = b53_get_strings, @@ -1464,6 +1539,8 @@ static struct dsa_switch_ops b53_switch_ops = { .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, }; struct b53_chip_data { @@ -1672,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM7278_DEVICE_ID, + .dev_name = "BCM7278", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries= 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, }; static int b53_switch_init(struct b53_device *dev) @@ -1765,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base, struct dsa_switch *ds; struct b53_device *dev; - ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); + ds = dsa_switch_alloc(base, DSA_MAX_PORTS); if (!ds) return NULL; - dev = (struct b53_device *)(ds + 1); + dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; ds->priv = dev; - ds->dev = base; dev->dev = base; dev->ds = ds; @@ -1869,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev) pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); - return dsa_register_switch(dev->ds, dev->ds->dev->of_node); + return dsa_register_switch(dev->ds, dev->ds->dev); } EXPORT_SYMBOL(b53_switch_register); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 477a16b5660a..fa7556f5d4fb 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = { .of_match_table = b53_of_match, }, }; - -static int __init b53_mdio_driver_register(void) -{ - return mdio_driver_register(&b53_mdio_driver); -} -module_init(b53_mdio_driver_register); - -static void __exit b53_mdio_driver_unregister(void) -{ - mdio_driver_unregister(&b53_mdio_driver); -} -module_exit(b53_mdio_driver_unregister); +mdio_module_driver(b53_mdio_driver); MODULE_DESCRIPTION("B53 MDIO access driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index f192a673caba..a9dc90a01438 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/phy.h> +#include <linux/etherdevice.h> #include <net/dsa.h> #include "b53_regs.h" @@ -61,6 +62,7 @@ enum { BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, BCM7445_DEVICE_ID = 0x7445, + BCM7278_DEVICE_ID = 0x7278, }; #define B53_N_PORTS 9 @@ -68,7 +70,6 @@ enum { struct b53_port { u16 vlan_ctl_mask; - struct net_device *bridge_dev; }; struct b53_vlan { @@ -178,7 +179,8 @@ static inline int is5301x(struct b53_device *dev) static inline int is58xx(struct b53_device *dev) { return dev->chip_id == BCM58XX_DEVICE_ID || - dev->chip_id == BCM7445_DEVICE_ID; + dev->chip_id == BCM7445_DEVICE_ID || + dev->chip_id == BCM7278_DEVICE_ID; } #define B53_CPU_PORT_25 5 @@ -325,25 +327,6 @@ struct b53_arl_entry { u8 is_static:1; }; -static inline void b53_mac_from_u64(u64 src, u8 *dst) -{ - unsigned int i; - - for (i = 0; i < ETH_ALEN; i++) - dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; -} - -static inline u64 b53_mac_to_u64(const u8 *src) -{ - unsigned int i; - u64 dst = 0; - - for (i = 0; i < ETH_ALEN; i++) - dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); - - return dst; -} - static inline void b53_arl_to_entry(struct b53_arl_entry *ent, u64 mac_vid, u32 fwd_entry) { @@ -352,14 +335,14 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, ent->is_valid = !!(fwd_entry & ARLTBL_VALID); ent->is_age = !!(fwd_entry & ARLTBL_AGE); ent->is_static = !!(fwd_entry & ARLTBL_STATIC); - b53_mac_from_u64(mac_vid, ent->mac); + u64_to_ether_addr(mac_vid, ent->mac); ent->vid = mac_vid >> ARLTBL_VID_S; } static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, const struct b53_arl_entry *ent) { - *mac_vid = b53_mac_to_u64(ent->mac); + *mac_vid = ether_addr_to_u64(ent->mac); *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK; if (ent->is_valid) @@ -392,4 +375,41 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev) return -ENOENT; } #endif + +/* Exported functions towards other drivers */ +void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); +void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); +int b53_get_sset_count(struct dsa_switch *ds); +int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); +void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); +void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); +void b53_br_fast_age(struct dsa_switch *ds, int port); +int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); +int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); +void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); +int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)); +int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); +void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); +int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb); +int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); +int b53_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, bool ingress); +void b53_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror); + #endif diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index dac0af4e2cd0..9fd24c418fa4 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -206,6 +206,38 @@ #define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ #define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ +/* Mirror capture control register (16 bit) */ +#define B53_MIR_CAP_CTL 0x10 +#define CAP_PORT_MASK 0xf +#define BLK_NOT_MIR BIT(14) +#define MIRROR_EN BIT(15) + +/* Ingress mirror control register (16 bit) */ +#define B53_IG_MIR_CTL 0x12 +#define MIRROR_MASK 0x1ff +#define DIV_EN BIT(13) +#define MIRROR_FILTER_MASK 0x3 +#define MIRROR_FILTER_SHIFT 14 +#define MIRROR_ALL 0 +#define MIRROR_DA 1 +#define MIRROR_SA 2 + +/* Ingress mirror divider register (16 bit) */ +#define B53_IG_MIR_DIV 0x14 +#define IN_MIRROR_DIV_MASK 0x3ff + +/* Ingress mirror MAC address register (48 bit) */ +#define B53_IG_MIR_MAC 0x16 + +/* Egress mirror control register (16 bit) */ +#define B53_EG_MIR_CTL 0x1C + +/* Egress mirror divider register (16 bit) */ +#define B53_EG_MIR_DIV 0x1E + +/* Egress mirror MAC address register (48 bit) */ +#define B53_EG_MIR_MAC 0x20 + /* Device ID register (8 or 32 bit) */ #define B53_DEVICE_ID 0x30 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2ce7ae97ac91..2be963252ca5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) } } -static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg, val; - /* Enable the port memories */ - reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); - reg &= ~P_TXQ_PSM_VDD(port); - core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - - /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ - reg = core_readl(priv, CORE_IMP_CTL); - reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); - reg &= ~(RX_DIS | TX_DIS); - core_writel(priv, reg, CORE_IMP_CTL); - - /* Enable forwarding */ - core_writel(priv, SW_FWDG_EN, CORE_SWMODE); - - /* Enable IMP port in dumb mode */ - reg = core_readl(priv, CORE_SWITCH_CTRL); - reg |= MII_DUMB_FWDG_EN; - core_writel(priv, reg, CORE_SWITCH_CTRL); - /* Resolve which bit controls the Broadcom tag */ switch (port) { case 8: @@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); reg &= ~(1 << port); core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); +} + +static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; + + /* Enable the port memories */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + + /* Enable forwarding */ + core_writel(priv, SW_FWDG_EN, CORE_SWMODE); + + /* Enable IMP port in dumb mode */ + reg = core_readl(priv, CORE_SWITCH_CTRL); + reg |= MII_DUMB_FWDG_EN; + core_writel(priv, reg, CORE_SWITCH_CTRL); + + bcm_sf2_brcm_hdr_setup(priv, port); /* Force link status for IMP port */ - reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); + reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); - core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); + core_writel(priv, reg, offset); } static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) @@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; + unsigned int i; u32 reg; /* Clear the memory power down */ @@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + /* Enable Broadcom tags for that port if requested */ + if (priv->brcm_tag_mask & BIT(port)) + bcm_sf2_brcm_hdr_setup(priv, port); + + /* Configure Traffic Class to QoS mapping, allow each priority to map + * to a different queue number + */ + reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); + for (i = 0; i < 8; i++) + reg |= i << (PRT_TO_QID_SHIFT * i); + core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); + /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); @@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, if (mode == PHY_INTERFACE_MODE_MOCA) priv->moca_port = port_num; + + if (of_property_read_bool(port, "brcm,use-bcm-hdr")) + priv->brcm_tag_mask |= 1 << port_num; } } @@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct ethtool_eee *p = &priv->port_sts[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; - u32 reg; + u32 reg, offset; + + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); switch (phydev->interface) { case PHY_INTERFACE_MODE_RGMII: @@ -662,7 +695,7 @@ force_link: if (phydev->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if (!phydev->is_pseudo_fixed_link) p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); @@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 duplex, pause; + u32 duplex, pause, offset; u32 reg; + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); @@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->duplex = !!(duplex & (1 << port)); } - reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + reg = core_readl(priv, offset); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + core_writel(priv, reg, offset); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { @@ -977,10 +1015,110 @@ static struct b53_io_ops bcm_sf2_io_ops = { .write64 = bcm_sf2_core_write64, }; +static const struct dsa_switch_ops bcm_sf2_ops = { + .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, + .setup = bcm_sf2_sw_setup, + .get_strings = b53_get_strings, + .get_ethtool_stats = b53_get_ethtool_stats, + .get_sset_count = b53_get_sset_count, + .get_phy_flags = bcm_sf2_sw_get_phy_flags, + .adjust_link = bcm_sf2_sw_adjust_link, + .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .suspend = bcm_sf2_sw_suspend, + .resume = bcm_sf2_sw_resume, + .get_wol = bcm_sf2_sw_get_wol, + .set_wol = bcm_sf2_sw_set_wol, + .port_enable = bcm_sf2_port_setup, + .port_disable = bcm_sf2_port_disable, + .get_eee = bcm_sf2_sw_get_eee, + .set_eee = bcm_sf2_sw_set_eee, + .port_bridge_join = b53_br_join, + .port_bridge_leave = b53_br_leave, + .port_stp_state_set = b53_br_set_stp_state, + .port_fast_age = b53_br_fast_age, + .port_vlan_filtering = b53_vlan_filtering, + .port_vlan_prepare = b53_vlan_prepare, + .port_vlan_add = b53_vlan_add, + .port_vlan_del = b53_vlan_del, + .port_vlan_dump = b53_vlan_dump, + .port_fdb_prepare = b53_fdb_prepare, + .port_fdb_dump = b53_fdb_dump, + .port_fdb_add = b53_fdb_add, + .port_fdb_del = b53_fdb_del, + .get_rxnfc = bcm_sf2_get_rxnfc, + .set_rxnfc = bcm_sf2_set_rxnfc, + .port_mirror_add = b53_mirror_add, + .port_mirror_del = b53_mirror_del, +}; + +struct bcm_sf2_of_data { + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; +}; + +/* Register offsets for the SWITCH_REG_* block */ +static const u16 bcm_sf2_7445_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0C, + [REG_SWITCH_REVISION] = 0x18, + [REG_PHY_REVISION] = 0x1C, + [REG_SPHY_CNTRL] = 0x2C, + [REG_RGMII_0_CNTRL] = 0x34, + [REG_RGMII_1_CNTRL] = 0x40, + [REG_RGMII_2_CNTRL] = 0x4c, + [REG_LED_0_CNTRL] = 0x90, + [REG_LED_1_CNTRL] = 0x94, + [REG_LED_2_CNTRL] = 0x98, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7445_data = { + .type = BCM7445_DEVICE_ID, + .core_reg_align = 0, + .reg_offsets = bcm_sf2_7445_reg_offsets, +}; + +static const u16 bcm_sf2_7278_reg_offsets[] = { + [REG_SWITCH_CNTRL] = 0x00, + [REG_SWITCH_STATUS] = 0x04, + [REG_DIR_DATA_WRITE] = 0x08, + [REG_DIR_DATA_READ] = 0x0c, + [REG_SWITCH_REVISION] = 0x10, + [REG_PHY_REVISION] = 0x14, + [REG_SPHY_CNTRL] = 0x24, + [REG_RGMII_0_CNTRL] = 0xe0, + [REG_RGMII_1_CNTRL] = 0xec, + [REG_RGMII_2_CNTRL] = 0xf8, + [REG_LED_0_CNTRL] = 0x40, + [REG_LED_1_CNTRL] = 0x4c, + [REG_LED_2_CNTRL] = 0x58, +}; + +static const struct bcm_sf2_of_data bcm_sf2_7278_data = { + .type = BCM7278_DEVICE_ID, + .core_reg_align = 1, + .reg_offsets = bcm_sf2_7278_reg_offsets, +}; + +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0", + .data = &bcm_sf2_7445_data + }, + { .compatible = "brcm,bcm7278-switch-v4.0", + .data = &bcm_sf2_7278_data + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); + static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; struct bcm_sf2_priv *priv; @@ -1008,42 +1146,38 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; + of_id = of_match_node(bcm_sf2_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + data = of_id->data; + + /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ + priv->type = data->type; + priv->reg_offsets = data->reg_offsets; + priv->core_reg_align = data->core_reg_align; + /* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for * b53_common to work with */ - pdata->chip_id = BCM7445_DEVICE_ID; + pdata->chip_id = priv->type; dev->pdata = pdata; priv->dev = dev; ds = dev->ds; - - /* Override the parts that are non-standard wrt. normal b53 devices */ - memcpy(ops, ds->ops, sizeof(*ops)); - ds->ops = ops; - ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; - ds->ops->setup = bcm_sf2_sw_setup; - ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; - ds->ops->adjust_link = bcm_sf2_sw_adjust_link; - ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update; - ds->ops->suspend = bcm_sf2_sw_suspend; - ds->ops->resume = bcm_sf2_sw_resume; - ds->ops->get_wol = bcm_sf2_sw_get_wol; - ds->ops->set_wol = bcm_sf2_sw_set_wol; - ds->ops->port_enable = bcm_sf2_port_setup; - ds->ops->port_disable = bcm_sf2_port_disable; - ds->ops->get_eee = bcm_sf2_sw_get_eee; - ds->ops->set_eee = bcm_sf2_sw_set_eee; - - /* Avoid having DSA free our slave MDIO bus (checking for - * ds->slave_mii_bus and ds->ops->phy_read being non-NULL) - */ - ds->ops->phy_read = NULL; + ds->ops = &bcm_sf2_ops; dev_set_drvdata(&pdev->dev, priv); spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); + mutex_init(&priv->cfp.lock); + + /* CFP rule #0 cannot be used for specific classifications, flag it as + * permanently used + */ + set_bit(0, priv->cfp.used); bcm_sf2_identify_ports(priv, dn->child); @@ -1073,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + ret = bcm_sf2_cfp_rst(priv); + if (ret) { + pr_err("failed to reset CFP\n"); + goto out_mdio; + } + /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); @@ -1179,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, bcm_sf2_suspend, bcm_sf2_resume); -static const struct of_device_id bcm_sf2_of_match[] = { - { .compatible = "brcm,bcm7445-switch-v4.0" }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 44692673e1d5..7d3030e04f11 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -52,6 +52,13 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; }; +struct bcm_sf2_cfp_priv { + /* Mutex protecting concurrent accesses to the CFP registers */ + struct mutex lock; + DECLARE_BITMAP(used, CFP_NUM_RULES); + unsigned int rules_cnt; +}; + struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -61,6 +68,11 @@ struct bcm_sf2_priv { void __iomem *fcb; void __iomem *acb; + /* Register offsets indirection tables */ + u32 type; + const u16 *reg_offsets; + unsigned int core_reg_align; + /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; @@ -95,6 +107,12 @@ struct bcm_sf2_priv { struct device_node *master_mii_dn; struct mii_bus *slave_mii_bus; struct mii_bus *master_mii_bus; + + /* Bitmask of ports needing BRCM tags */ + unsigned int brcm_tag_mask; + + /* CFP rules context */ + struct bcm_sf2_cfp_priv cfp; }; static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) @@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) return dev->priv; } +static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off) +{ + return off << priv->core_reg_align; +} + #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ { \ @@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ { \ u32 indir, dir; \ spin_lock(&priv->indir_lock); \ - dir = __raw_readl(priv->name + off); \ + dir = name##_readl(priv, off); \ indir = reg_readl(priv, REG_DIR_DATA_READ); \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ @@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ { \ spin_lock(&priv->indir_lock); \ reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ - __raw_writel(lower_32_bits(val), priv->name + off); \ + name##_writel(priv, lower_32_bits(val), off); \ spin_unlock(&priv->indir_lock); \ } @@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ priv->irq##which##_mask |= (mask); \ } \ -SF2_IO_MACRO(core); -SF2_IO_MACRO(reg); +static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + return __raw_readl(priv->core + tmp); +} + +static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off) +{ + u32 tmp = bcm_sf2_mangle_addr(priv, off); + __raw_writel(val, priv->core + tmp); +} + +static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off) +{ + return __raw_readl(priv->reg + priv->reg_offsets[off]); +} + +static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off) +{ + __raw_writel(val, priv->reg + priv->reg_offsets[off]); +} + SF2_IO64_MACRO(core); SF2_IO_MACRO(intrl2_0); SF2_IO_MACRO(intrl2_1); @@ -164,4 +207,11 @@ SF2_IO_MACRO(acb); SWITCH_INTR_L2(0); SWITCH_INTR_L2(1); +/* RXNFC */ +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs); +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc); +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); + #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c new file mode 100644 index 000000000000..c71be3e0dc2d --- /dev/null +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -0,0 +1,613 @@ +/* + * Broadcom Starfighter 2 DSA switch CFP support + * + * Copyright (C) 2016, Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <net/dsa.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/bitmap.h> + +#include "bcm_sf2.h" +#include "bcm_sf2_regs.h" + +struct cfp_udf_layout { + u8 slices[UDF_NUM_SLICES]; + u32 mask_value; + +}; + +/* UDF slices layout for a TCPv4/UDPv4 specification */ +static const struct cfp_udf_layout udf_tcpip4_layout = { + .slices = { + /* End of L2, byte offset 12, src IP[0:15] */ + CFG_UDF_EOL2 | 6, + /* End of L2, byte offset 14, src IP[16:31] */ + CFG_UDF_EOL2 | 7, + /* End of L2, byte offset 16, dst IP[0:15] */ + CFG_UDF_EOL2 | 8, + /* End of L2, byte offset 18, dst IP[16:31] */ + CFG_UDF_EOL2 | 9, + /* End of L3, byte offset 0, src port */ + CFG_UDF_EOL3 | 0, + /* End of L3, byte offset 2, dst port */ + CFG_UDF_EOL3 | 1, + 0, 0, 0 + }, + .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, +}; + +static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) +{ + unsigned int i, count = 0; + + for (i = 0; i < UDF_NUM_SLICES; i++) { + if (layout[i] != 0) + count++; + } + + return count; +} + +static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, + unsigned int slice_num, + const u8 *layout) +{ + u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET; + unsigned int i; + + for (i = 0; i < UDF_NUM_SLICES; i++) + core_writel(priv, layout[i], offset + i * 4); +} + +static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_STR_DONE | op; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & OP_STR_DONE)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, + unsigned int addr) +{ + u32 reg; + + WARN_ON(addr >= CFP_NUM_RULES); + + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= addr << XCESS_ADDR_SHIFT; + core_writel(priv, reg, CORE_CFP_ACC); +} + +static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) +{ + /* Entry #0 is reserved */ + return CFP_NUM_RULES - 1; +} + +static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_tcpip4_spec *v4_spec; + const struct cfp_udf_layout *layout; + unsigned int slice_num, rule_index; + unsigned int queue_num, port_num; + u8 ip_proto, ip_frag; + u8 num_udf; + u32 reg; + int ret; + + /* Check for unsupported extensions */ + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_etype || fs->m_ext.data[1])) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + ip_frag = be32_to_cpu(fs->m_ext.data[0]); + + /* We do not support discarding packets, check that the + * destination port is enabled and that we are within the + * number of ports supported by the switch + */ + port_num = fs->ring_cookie / 8; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC || + !(BIT(port_num) & ds->enabled_port_mask) || + port_num >= priv->hw_params.num_ports) + return -EINVAL; + + switch (fs->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ip_proto = IPPROTO_TCP; + v4_spec = &fs->h_u.tcp_ip4_spec; + break; + case UDP_V4_FLOW: + ip_proto = IPPROTO_UDP; + v4_spec = &fs->h_u.udp_ip4_spec; + break; + default: + return -EINVAL; + } + + /* We only use one UDF slice for now */ + slice_num = 1; + layout = &udf_tcpip4_layout; + num_udf = bcm_sf2_get_num_udf_slices(layout->slices); + + /* Apply the UDF layout for this filter */ + bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices); + + /* Apply to all packets received through this port */ + core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); + + /* S-Tag status [31:30] + * C-Tag status [29:28] + * L2 framing [27:26] + * L3 framing [25:24] + * IP ToS [23:16] + * IP proto [15:08] + * IP Fragm [7] + * Non 1st frag [6] + * IP Authen [5] + * TTL range [4:3] + * PPPoE session [2] + * Reserved [1] + * UDF_Valid[8] [0] + */ + core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7, + CORE_CFP_DATA_PORT(6)); + + /* UDF_Valid[7:0] [31:24] + * S-Tag [23:8] + * C-Tag [7:0] + */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5)); + + /* C-Tag [31:24] + * UDF_n_A8 [23:8] + * UDF_n_A7 [7:0] + */ + core_writel(priv, 0, CORE_CFP_DATA_PORT(4)); + + /* UDF_n_A7 [31:24] + * UDF_n_A6 [23:8] + * UDF_n_A5 [7:0] + */ + core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8, + CORE_CFP_DATA_PORT(3)); + + /* UDF_n_A5 [31:24] + * UDF_n_A4 [23:8] + * UDF_n_A3 [7:0] + */ + reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | + (u32)be16_to_cpu(v4_spec->psrc) << 8 | + (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(2)); + + /* UDF_n_A3 [31:24] + * UDF_n_A2 [23:8] + * UDF_n_A1 [7:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | + (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; + core_writel(priv, reg, CORE_CFP_DATA_PORT(1)); + + /* UDF_n_A1 [31:24] + * UDF_n_A0 [23:8] + * Reserved [7:4] + * Slice ID [3:2] + * Slice valid [1:0] + */ + reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | + (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | + SLICE_NUM(slice_num) | SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Source port map match */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); + + /* Mask with the specific layout for IPv4 packets */ + core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6)); + + /* Mask all but valid UDFs */ + core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5)); + + /* Mask all */ + core_writel(priv, 0, CORE_CFP_MASK_PORT(4)); + + /* All other UDFs should be matched with the filter */ + core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2)); + core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1)); + core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0)); + + /* Locate the first rule available */ + if (fs->location == RX_CLS_LOC_ANY) + rule_index = find_first_zero_bit(priv->cfp.used, + bcm_sf2_cfp_rule_size(priv)); + else + rule_index = fs->location; + + /* Insert into TCAM now */ + bcm_sf2_cfp_rule_addr_set(priv, rule_index); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) { + pr_err("TCAM entry at addr %d failed\n", rule_index); + return ret; + } + + /* Replace ARL derived destination with DST_MAP derived, define + * which port and queue this should be forwarded to. + * + * We have a small oddity where Port 6 just does not have a + * valid bit here (so we subtract by one). + */ + queue_num = fs->ring_cookie % 8; + if (port_num >= 7) + port_num -= 1; + + reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) | + CHANGE_TC | queue_num << NEW_TC_SHIFT; + + core_writel(priv, reg, CORE_ACT_POL_DATA0); + + /* Set classification ID that needs to be put in Broadcom tag */ + core_writel(priv, rule_index << CHAIN_ID_SHIFT, + CORE_ACT_POL_DATA1); + + core_writel(priv, 0, CORE_ACT_POL_DATA2); + + /* Configure policer RAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); + if (ret) { + pr_err("Policer entry at %d failed\n", rule_index); + return ret; + } + + /* Disable the policer */ + core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); + + /* Now the rate meter */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); + if (ret) { + pr_err("Meter entry at %d failed\n", rule_index); + return ret; + } + + /* Turn on CFP for this rule now */ + reg = core_readl(priv, CORE_CFP_CTL_REG); + reg |= BIT(port); + core_writel(priv, reg, CORE_CFP_CTL_REG); + + /* Flag the rule as being used and return it */ + set_bit(rule_index, priv->cfp.used); + fs->location = rule_index; + + return 0; +} + +static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, + u32 loc) +{ + int ret; + u32 reg; + + /* Refuse deletion of unused rules, and the default reserved rule */ + if (!test_bit(loc, priv->cfp.used) || loc == 0) + return -EINVAL; + + /* Indicate which rule we want to read */ + bcm_sf2_cfp_rule_addr_set(priv, loc); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + + /* Clear its valid bits */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + reg &= ~SLICE_VALID; + core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); + + /* Write back this entry into the TCAM now */ + ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); + if (ret) + return ret; + + clear_bit(loc, priv->cfp.used); + + return 0; +} + +static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) +{ + unsigned int i; + + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xff; + + flow->m_ext.vlan_etype ^= cpu_to_be16(~0); + flow->m_ext.vlan_tci ^= cpu_to_be16(~0); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); +} + +static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, + struct ethtool_rxnfc *nfc, bool search) +{ + struct ethtool_tcpip4_spec *v4_spec; + unsigned int queue_num; + u16 src_dst_port; + u32 reg, ipv4; + int ret; + + if (!search) { + bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); + if (ret) + return ret; + + reg = core_readl(priv, CORE_ACT_POL_DATA0); + + ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); + if (ret) + return ret; + } else { + reg = core_readl(priv, CORE_ACT_POL_DATA0); + } + + /* Extract the destination port */ + nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & + DST_MAP_IB_MASK) - 1; + + /* There is no Port 6, so we compensate for that here */ + if (nfc->fs.ring_cookie >= 6) + nfc->fs.ring_cookie++; + nfc->fs.ring_cookie *= 8; + + /* Extract the destination queue */ + queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; + nfc->fs.ring_cookie += queue_num; + + /* Extract the IP protocol */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); + switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { + case IPPROTO_TCP: + nfc->fs.flow_type = TCP_V4_FLOW; + v4_spec = &nfc->fs.h_u.tcp_ip4_spec; + break; + case IPPROTO_UDP: + nfc->fs.flow_type = UDP_V4_FLOW; + v4_spec = &nfc->fs.h_u.udp_ip4_spec; + break; + default: + /* Clear to exit the search process */ + if (search) + core_readl(priv, CORE_CFP_DATA_PORT(7)); + return -EINVAL; + } + + v4_spec->tos = (reg >> 16) & IPPROTO_MASK; + nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1); + + reg = core_readl(priv, CORE_CFP_DATA_PORT(3)); + /* src port [15:8] */ + src_dst_port = reg << 8; + + reg = core_readl(priv, CORE_CFP_DATA_PORT(2)); + /* src port [7:0] */ + src_dst_port |= (reg >> 24); + + v4_spec->pdst = cpu_to_be16(src_dst_port); + nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); + nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + + /* IPv4 dst [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(1)); + /* IPv4 dst [31:16] */ + ipv4 |= (u32)((reg >> 8) & 0xffffff) << 16; + /* IPv4 dst [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + v4_spec->ip4dst = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + + /* IPv4 src [15:8] */ + ipv4 = (u16)(reg & 0xff) << 8; + reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); + + if (!(reg & SLICE_VALID)) + return -EINVAL; + + /* IPv4 src [7:0] */ + ipv4 |= (reg >> 24) & 0xff; + /* IPv4 src [31:16] */ + ipv4 |= ((reg >> 8) & 0xffffff) << 16; + v4_spec->ip4src = cpu_to_be32(ipv4); + nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + + /* Read last to avoid next entry clobbering the results during search + * operations + */ + reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); + if (!(reg & 1 << port)) + return -EINVAL; + + bcm_sf2_invert_masks(&nfc->fs); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + + return 0; +} + +/* We implement the search doing a TCAM search operation */ +static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, + int port, struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + unsigned int index = 1, rules_cnt = 0; + int ret; + u32 reg; + + /* Do not poll on OP_STR_DONE to be self-clearing for search + * operations, we cannot use bcm_sf2_cfp_op here because it completes + * on clearing OP_STR_DONE which won't clear until the entire search + * operation is over. + */ + reg = core_readl(priv, CORE_CFP_ACC); + reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); + reg |= index << XCESS_ADDR_SHIFT; + reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); + reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + /* Wait for results to be ready */ + reg = core_readl(priv, CORE_CFP_ACC); + + /* Extract the address we are searching */ + index = reg >> XCESS_ADDR_SHIFT; + index &= XCESS_ADDR_MASK; + + /* We have a valid search result, so flag it accordingly */ + if (reg & SEARCH_STS) { + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true); + if (ret) + continue; + + rule_locs[rules_cnt] = index; + rules_cnt++; + } + + /* Search is over break out */ + if (!(reg & OP_STR_DONE)) + break; + + } while (index < CFP_NUM_RULES); + + /* Put the TCAM size here */ + nfc->data = bcm_sf2_cfp_rule_size(priv); + nfc->rule_cnt = rules_cnt; + + return 0; +} + +int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_GRXCLSRLCNT: + /* Subtract the default, unusable rule */ + nfc->rule_cnt = bitmap_weight(priv->cfp.used, + CFP_NUM_RULES) - 1; + /* We support specifying rule locations */ + nfc->data |= RX_CLS_LOC_SPECIAL; + break; + case ETHTOOL_GRXCLSRULE: + ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false); + break; + case ETHTOOL_GRXCLSRLALL: + ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + int ret = 0; + + mutex_lock(&priv->cfp.lock); + + switch (nfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); + break; + + case ETHTOOL_SRXCLSRLDEL: + ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&priv->cfp.lock); + + return ret; +} + +int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_CFP_ACC); + reg |= TCAM_RESET; + core_writel(priv, reg, CORE_CFP_ACC); + + do { + reg = core_readl(priv, CORE_CFP_ACC); + if (!(reg & TCAM_RESET)) + break; + + cpu_relax(); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 838fe373cd6f..26052450091e 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -12,22 +12,36 @@ #define __BCM_SF2_REGS_H /* Register set relative to 'REG' */ -#define REG_SWITCH_CNTRL 0x00 -#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_STATUS 0x04 -#define REG_DIR_DATA_WRITE 0x08 -#define REG_DIR_DATA_READ 0x0C +enum bcm_sf2_reg_offs { + REG_SWITCH_CNTRL = 0, + REG_SWITCH_STATUS, + REG_DIR_DATA_WRITE, + REG_DIR_DATA_READ, + REG_SWITCH_REVISION, + REG_PHY_REVISION, + REG_SPHY_CNTRL, + REG_RGMII_0_CNTRL, + REG_RGMII_1_CNTRL, + REG_RGMII_2_CNTRL, + REG_LED_0_CNTRL, + REG_LED_1_CNTRL, + REG_LED_2_CNTRL, + REG_SWITCH_REG_MAX, +}; + +/* Relative to REG_SWITCH_CNTRL */ +#define MDIO_MASTER_SEL (1 << 0) -#define REG_SWITCH_REVISION 0x18 +/* Relative to REG_SWITCH_REVISION */ #define SF2_REV_MASK 0xffff #define SWITCH_TOP_REV_SHIFT 16 #define SWITCH_TOP_REV_MASK 0xffff -#define REG_PHY_REVISION 0x1C +/* Relative to REG_PHY_REVISION */ #define PHY_REVISION_MASK 0xffff -#define REG_SPHY_CNTRL 0x2C +/* Relative to REG_SPHY_CNTRL */ #define IDDQ_BIAS (1 << 0) #define EXT_PWR_DOWN (1 << 1) #define FORCE_DLL_EN (1 << 2) @@ -37,13 +51,8 @@ #define PHY_PHYAD_SHIFT 8 #define PHY_PHYAD_MASK 0x1F -#define REG_RGMII_0_BASE 0x34 -#define REG_RGMII_CNTRL 0x00 -#define REG_RGMII_IB_STATUS 0x04 -#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08 -#define REG_RGMII_CNTRL_SIZE 0x0C -#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \ - ((x) * REG_RGMII_CNTRL_SIZE)) +#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x)) + /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) #define ID_MODE_DIS (1 << 1) @@ -61,8 +70,8 @@ #define LPI_COUNT_SHIFT 9 #define LPI_COUNT_MASK 0x3F -#define REG_LED_CNTRL_BASE 0x90 -#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4) +#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x)) + #define SPDLNK_SRC_SEL (1 << 24) /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ @@ -125,6 +134,9 @@ #define GMII_SPEED_UP_2G (1 << 6) #define MII_SW_OR (1 << 7) +/* Alternate layout for e.g: 7278 */ +#define CORE_STS_OVERRIDE_IMP2 0x39040 + #define CORE_NEW_CTRL 0x00084 #define IP_MC (1 << 0) #define OUTRANGEERR_DISCARD (1 << 1) @@ -142,6 +154,7 @@ #define SW_LEARN_CNTL(x) (1 << (x)) #define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4) +#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8) #define LINK_STS (1 << 0) #define DUPLX_MODE (1 << 1) #define SPEED_SHIFT 2 @@ -225,6 +238,10 @@ #define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ ((x) * P_TXQ_PSM_VDD_SHIFT)) +#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10)) +#define PRT_TO_QID_MASK 0x3 +#define PRT_TO_QID_SHIFT 3 + #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff @@ -238,4 +255,150 @@ #define CORE_EEE_EN_CTRL 0x24800 #define CORE_EEE_LPI_INDICATE 0x24810 +#define CORE_CFP_ACC 0x28000 +#define OP_STR_DONE (1 << 0) +#define OP_SEL_SHIFT 1 +#define OP_SEL_READ (1 << OP_SEL_SHIFT) +#define OP_SEL_WRITE (2 << OP_SEL_SHIFT) +#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT) +#define OP_SEL_MASK (7 << OP_SEL_SHIFT) +#define CFP_RAM_CLEAR (1 << 4) +#define RAM_SEL_SHIFT 10 +#define TCAM_SEL (1 << RAM_SEL_SHIFT) +#define ACT_POL_RAM (2 << RAM_SEL_SHIFT) +#define RATE_METER_RAM (4 << RAM_SEL_SHIFT) +#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT) +#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT) +#define RED_STAT_RAM (24 << RAM_SEL_SHIFT) +#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT) +#define TCAM_RESET (1 << 15) +#define XCESS_ADDR_SHIFT 16 +#define XCESS_ADDR_MASK 0xff +#define SEARCH_STS (1 << 27) +#define RD_STS_SHIFT 28 +#define RD_STS_TCAM (1 << RD_STS_SHIFT) +#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT) +#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT) +#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT) + +#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010 + +#define CORE_CFP_DATA_PORT_0 0x28040 +#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \ + (x) * 0x10) + +/* UDF_DATA7 */ +#define L3_FRAMING_SHIFT 24 +#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT) +#define IPPROTO_SHIFT 8 +#define IPPROTO_MASK (0xff << IPPROTO_SHIFT) +#define IP_FRAG (1 << 7) + +/* UDF_DATA0 */ +#define SLICE_VALID 3 +#define SLICE_NUM_SHIFT 2 +#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT) + +#define CORE_CFP_MASK_PORT_0 0x280c0 + +#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \ + (x) * 0x10) + +#define CORE_ACT_POL_DATA0 0x28140 +#define VLAN_BYP (1 << 0) +#define EAP_BYP (1 << 1) +#define STP_BYP (1 << 2) +#define REASON_CODE_SHIFT 3 +#define REASON_CODE_MASK 0x3f +#define LOOP_BK_EN (1 << 9) +#define NEW_TC_SHIFT 10 +#define NEW_TC_MASK 0x7 +#define CHANGE_TC (1 << 13) +#define DST_MAP_IB_SHIFT 14 +#define DST_MAP_IB_MASK 0x1ff +#define CHANGE_FWRD_MAP_IB_SHIFT 24 +#define CHANGE_FWRD_MAP_IB_MASK 0x3 +#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT) +#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT) +#define NEW_DSCP_IB_SHIFT 26 +#define NEW_DSCP_IB_MASK 0x3f + +#define CORE_ACT_POL_DATA1 0x28150 +#define CHANGE_DSCP_IB (1 << 0) +#define DST_MAP_OB_SHIFT 1 +#define DST_MAP_OB_MASK 0x3ff +#define CHANGE_FWRD_MAP_OB_SHIT 11 +#define CHANGE_FWRD_MAP_OB_MASK 0x3 +#define NEW_DSCP_OB_SHIFT 13 +#define NEW_DSCP_OB_MASK 0x3f +#define CHANGE_DSCP_OB (1 << 19) +#define CHAIN_ID_SHIFT 20 +#define CHAIN_ID_MASK 0xff +#define CHANGE_COLOR (1 << 28) +#define NEW_COLOR_SHIFT 29 +#define NEW_COLOR_MASK 0x3 +#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT) +#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT) +#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT) +#define RED_DEFAULT (1 << 31) + +#define CORE_ACT_POL_DATA2 0x28160 +#define MAC_LIMIT_BYPASS (1 << 0) +#define CHANGE_TC_O (1 << 1) +#define NEW_TC_O_SHIFT 2 +#define NEW_TC_O_MASK 0x7 +#define SPCP_RMK_DISABLE (1 << 5) +#define CPCP_RMK_DISABLE (1 << 6) +#define DEI_RMK_DISABLE (1 << 7) + +#define CORE_RATE_METER0 0x28180 +#define COLOR_MODE (1 << 0) +#define POLICER_ACTION (1 << 1) +#define COUPLING_FLAG (1 << 2) +#define POLICER_MODE_SHIFT 3 +#define POLICER_MODE_MASK 0x3 +#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT) +#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT) +#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT) +#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT) + +#define CORE_RATE_METER1 0x28190 +#define EIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER2 0x281a0 +#define EIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER3 0x281b0 +#define EIR_REF_CNT_MASK 0x7ffff + +#define CORE_RATE_METER4 0x281c0 +#define CIR_TK_BKT_MASK 0x7fffff + +#define CORE_RATE_METER5 0x281d0 +#define CIR_BKT_SIZE_MASK 0xfffff + +#define CORE_RATE_METER6 0x281e0 +#define CIR_REF_CNT_MASK 0x7ffff + +#define CORE_CFP_CTL_REG 0x28400 +#define CFP_EN_MAP_MASK 0x1ff + +/* IPv4 slices, 3 of them */ +#define CORE_UDF_0_A_0_8_PORT_0 0x28440 +#define CFG_UDF_OFFSET_MASK 0x1f +#define CFG_UDF_OFFSET_BASE_SHIFT 5 +#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT) +#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT) + +/* Number of slices for IPv4, IPv6 and non-IP */ +#define UDF_NUM_SLICES 9 + +/* Spacing between different slices */ +#define UDF_SLICE_OFFSET 0x40 + +#define CFP_NUM_RULES 256 + #endif /* __BCM_SF2_REGS_H */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 7ce36dbd9b62..5934b7a4c448 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -252,7 +252,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(ds, addr, regnum, val); } -static struct dsa_switch_ops mv88e6060_switch_ops = { +static const struct dsa_switch_ops mv88e6060_switch_ops = { .get_tag_protocol = mv88e6060_get_tag_protocol, .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, @@ -261,16 +261,20 @@ static struct dsa_switch_ops mv88e6060_switch_ops = { .phy_write = mv88e6060_phy_write, }; +static struct dsa_switch_driver mv88e6060_switch_drv = { + .ops = &mv88e6060_switch_ops, +}; + static int __init mv88e6060_init(void) { - register_switch_driver(&mv88e6060_switch_ops); + register_switch_driver(&mv88e6060_switch_drv); return 0; } module_init(mv88e6060_init); static void __exit mv88e6060_cleanup(void) { - unregister_switch_driver(&mv88e6060_switch_ops); + unregister_switch_driver(&mv88e6060_switch_drv); } module_exit(mv88e6060_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f7222dc6581d..22ce57256d34 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -222,26 +222,62 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) return 0; } +static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) +{ + return mv88e6xxx_read(chip, addr, reg, val); +} + +static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) +{ + return mv88e6xxx_write(chip, addr, reg, val); +} + +static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) +{ + struct mv88e6xxx_mdio_bus *mdio_bus; + + mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, + list); + if (!mdio_bus) + return NULL; + + return mdio_bus->bus; +} + static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; if (!chip->info->ops->phy_read) return -EOPNOTSUPP; - return chip->info->ops->phy_read(chip, addr, reg, val); + return chip->info->ops->phy_read(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val) { int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; if (!chip->info->ops->phy_write) return -EOPNOTSUPP; - return chip->info->ops->phy_write(chip, addr, reg, val); + return chip->info->ops->phy_write(chip, bus, addr, reg, val); } static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) @@ -611,8 +647,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip) del_timer_sync(&chip->ppu_timer); } -static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 *val) +static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { int err; @@ -625,8 +662,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, return err; } -static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 val) +static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { int err; @@ -664,6 +702,11 @@ static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) return chip->info->family == MV88E6XXX_FAMILY_6320; } +static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6341; +} + static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6351; @@ -1209,8 +1252,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) { - struct net_device *bridge = chip->ports[port].bridge_dev; struct dsa_switch *ds = chip->ds; + struct net_device *bridge = ds->ports[port].bridge_dev; u16 output_ports = 0; int i; @@ -1220,7 +1263,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) } else { for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { /* allow sending frames to every group member */ - if (bridge && chip->ports[i].bridge_dev == bridge) + if (bridge && ds->ports[i].bridge_dev == bridge) output_ports |= BIT(i); /* allow sending frames to CPU port and DSA link(s) */ @@ -1688,7 +1731,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || - mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) || + mv88e6xxx_6341_family(chip)) { struct mv88e6xxx_vtu_entry vstp; /* Adding a VTU entry requires a valid STU entry. As VSTP is not @@ -1782,17 +1826,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (chip->ports[i].bridge_dev == - chip->ports[port].bridge_dev) + if (ds->ports[i].bridge_dev == + ds->ports[port].bridge_dev) break; /* same bridge, check next VLAN */ - if (!chip->ports[i].bridge_dev) + if (!ds->ports[i].bridge_dev) continue; netdev_warn(ds->ports[port].netdev, "hardware VLAN %d already used by %s\n", vlan.vid, - netdev_name(chip->ports[i].bridge_dev)); + netdev_name(ds->ports[i].bridge_dev)); err = -EOPNOTSUPP; goto unlock; } @@ -2023,7 +2067,8 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid, struct mv88e6xxx_atu_entry next; int err; - eth_broadcast_addr(next.mac); + memcpy(next.mac, addr, ETH_ALEN); + eth_addr_dec(next.mac); err = _mv88e6xxx_atu_mac_write(chip, next.mac); if (err) @@ -2041,7 +2086,7 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid, *entry = next; return 0; } - } while (!is_broadcast_ether_addr(next.mac)); + } while (ether_addr_greater(addr, next.mac)); memset(entry, 0, sizeof(*entry)); entry->fid = fid; @@ -2281,18 +2326,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; int i, err = 0; mutex_lock(&chip->reg_lock); - /* Assign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = bridge; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (chip->ports[i].bridge_dev == bridge) { + if (ds->ports[i].bridge_dev == br) { err = _mv88e6xxx_port_based_vlan_map(chip, i); if (err) break; @@ -2304,19 +2347,17 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; - struct net_device *bridge = chip->ports[port].bridge_dev; int i; mutex_lock(&chip->reg_lock); - /* Unassign the bridge and remap each port's VLANTable */ - chip->ports[port].bridge_dev = NULL; - + /* Remap each port's VLANTable */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - if (i == port || chip->ports[i].bridge_dev == bridge) + if (i == port || ds->ports[i].bridge_dev == br) if (_mv88e6xxx_port_based_vlan_map(chip, i)) netdev_warn(ds->ports[i].netdev, "failed to remap\n"); @@ -2542,7 +2583,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || - mv88e6xxx_6185_family(chip)) + mv88e6xxx_6185_family(chip) || mv88e6xxx_6341_family(chip)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { @@ -2596,7 +2637,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip)) { + mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) { /* Port ATU control: disable limiting the number of * address database entries that this port is allowed * to use. @@ -2820,7 +2861,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) int i; chip->ds = ds; - ds->slave_mii_bus = chip->mdio_bus; + ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); mutex_lock(&chip->reg_lock); @@ -2877,50 +2918,64 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; u16 val; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; + if (!chip->info->ops->phy_read) + return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_read(chip, phy, reg, &val); + err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mutex_unlock(&chip->reg_lock); + if (reg == MII_PHYSID2) { + /* Some internal PHYS don't have a model number. Use + * the mv88e6390 family model number instead. + */ + if (!(val & 0x3f0)) + val |= PORT_SWITCH_ID_PROD_NUM_6390; + } + return err ? err : val; } static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { - struct mv88e6xxx_chip *chip = bus->priv; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; int err; - if (phy >= mv88e6xxx_num_ports(chip)) - return 0xffff; + if (!chip->info->ops->phy_write) + return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_write(chip, phy, reg, val); + err = chip->info->ops->phy_write(chip, bus, phy, reg, val); mutex_unlock(&chip->reg_lock); return err; } static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, - struct device_node *np) + struct device_node *np, + bool external) { static int index; + struct mv88e6xxx_mdio_bus *mdio_bus; struct mii_bus *bus; int err; - if (np) - chip->mdio_np = of_get_child_by_name(np, "mdio"); - - bus = devm_mdiobus_alloc(chip->dev); + bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); if (!bus) return -ENOMEM; - bus->priv = (void *)chip; + mdio_bus = bus->priv; + mdio_bus->bus = bus; + mdio_bus->chip = chip; + INIT_LIST_HEAD(&mdio_bus->list); + mdio_bus->external = external; + if (np) { bus->name = np->full_name; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); @@ -2933,183 +2988,73 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, bus->write = mv88e6xxx_mdio_write; bus->parent = chip->dev; - if (chip->mdio_np) - err = of_mdiobus_register(bus, chip->mdio_np); + if (np) + err = of_mdiobus_register(bus, np); else err = mdiobus_register(bus); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); - goto out; + return err; } - chip->mdio_bus = bus; - - return 0; - -out: - if (chip->mdio_np) - of_node_put(chip->mdio_np); - - return err; -} - -static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) - -{ - struct mii_bus *bus = chip->mdio_bus; - - mdiobus_unregister(bus); - - if (chip->mdio_np) - of_node_put(chip->mdio_np); -} - -#ifdef CONFIG_NET_DSA_HWMON -static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6); - if (ret < 0) - goto error; - - /* Enable temperature sensor */ - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5)); - if (ret < 0) - goto error; - - /* Wait for temperature to stabilize */ - usleep_range(10000, 12000); - - ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); - if (ret < 0) - goto error; - - /* Disable temperature sensor */ - ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5)); - if (ret < 0) - goto error; - - *temp = ((val & 0x1f) - 5) * 5; - -error: - mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0); - mutex_unlock(&chip->reg_lock); - return ret; -} - -static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - *temp = 0; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; - - *temp = (val & 0xff) - 25; + if (external) + list_add_tail(&mdio_bus->list, &chip->mdios); + else + list_add(&mdio_bus->list, &chip->mdios); return 0; } -static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) - return -EOPNOTSUPP; - - if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) - return mv88e63xx_get_temp(ds, temp); - - return mv88e61xx_get_temp(ds, temp); -} +static const struct of_device_id mv88e6xxx_mdio_external_match[] = { + { .compatible = "marvell,mv88e6xxx-mdio-external", + .data = (void *)true }, + { }, +}; -static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, + struct device_node *np) { - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *temp = 0; + const struct of_device_id *match; + struct device_node *child; + int err; - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; + /* Always register one mdio bus for the internal/default mdio + * bus. This maybe represented in the device tree, but is + * optional. + */ + child = of_get_child_by_name(np, "mdio"); + err = mv88e6xxx_mdio_register(chip, child, false); + if (err) + return err; - *temp = (((val >> 8) & 0x1f) * 5) - 25; + /* Walk the device tree, and see if there are any other nodes + * which say they are compatible with the external mdio + * bus. + */ + for_each_available_child_of_node(np, child) { + match = of_match_node(mv88e6xxx_mdio_external_match, child); + if (match) { + err = mv88e6xxx_mdio_register(chip, child, true); + if (err) + return err; + } + } return 0; } -static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int err; +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - if (err) - goto unlock; - temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - err = mv88e6xxx_phy_page_write(chip, phy, 6, 26, - (val & 0xe0ff) | (temp << 8)); -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - -static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { - struct mv88e6xxx_chip *chip = ds->priv; - int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - u16 val; - int ret; - - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) - return -EOPNOTSUPP; - - *alarm = false; - - mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); - mutex_unlock(&chip->reg_lock); - if (ret < 0) - return ret; + struct mv88e6xxx_mdio_bus *mdio_bus; + struct mii_bus *bus; - *alarm = !!(val & 0x40); + list_for_each_entry(mdio_bus, &chip->mdios, list) { + bus = mdio_bus->bus; - return 0; + mdiobus_unregister(bus); + } } -#endif /* CONFIG_NET_DSA_HWMON */ static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { @@ -3232,8 +3177,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { static const struct mv88e6xxx_ops mv88e6123_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3279,8 +3224,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { static const struct mv88e6xxx_ops mv88e6161_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3304,8 +3249,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = { /* MV88E6XXX_FAMILY_6165 */ .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_read, - .phy_write = mv88e6xxx_write, + .phy_read = mv88e6165_phy_read, + .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3452,6 +3397,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3477,6 +3424,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3502,6 +3451,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3555,6 +3506,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3711,8 +3664,66 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .reset = mv88e6352_g1_reset, }; +static const struct mv88e6xxx_ops mv88e6141_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + +static const struct mv88e6xxx_ops mv88e6341_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, +}; + static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3740,6 +3751,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3767,6 +3780,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { static const struct mv88e6xxx_ops mv88e6391_ops = { /* MV88E6XXX_FAMILY_6390 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, @@ -3996,7 +4011,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .port_base_addr = 0x0, .global1_addr = 0x1b, .tag_protocol = DSA_TAG_PROTO_DSA, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, @@ -4010,7 +4025,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4025,7 +4040,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4055,7 +4070,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4092,6 +4107,34 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .ops = &mv88e6321_ops, }, + [MV88E6141] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6141, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6141_ops, + }, + + [MV88E6341] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6341, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6341_ops, + }, + [MV88E6350] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, .family = MV88E6XXX_FAMILY_6351, @@ -4144,7 +4187,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4158,7 +4201,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_ports = 11, /* 10 + Z80 */ .port_base_addr = 0x0, .global1_addr = 0x1b, - .age_time_coeff = 15000, + .age_time_coeff = 3750, .g1_irqs = 9, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -4221,6 +4264,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) chip->dev = dev; mutex_init(&chip->reg_lock); + INIT_LIST_HEAD(&chip->mdios); return chip; } @@ -4240,10 +4284,6 @@ static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip) static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int sw_addr) { - /* ADDR[0] pin is unavailable externally and considered zero */ - if (sw_addr & 0x1) - return -EINVAL; - if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) @@ -4299,7 +4339,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, mv88e6xxx_phy_init(chip); - err = mv88e6xxx_mdio_register(chip, NULL); + err = mv88e6xxx_mdios_register(chip, NULL); if (err) goto free; @@ -4364,7 +4404,7 @@ static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, return err; } -static struct dsa_switch_ops mv88e6xxx_switch_ops = { +static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, @@ -4375,12 +4415,6 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .set_eee = mv88e6xxx_set_eee, .get_eee = mv88e6xxx_get_eee, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, - .get_temp_limit = mv88e6xxx_get_temp_limit, - .set_temp_limit = mv88e6xxx_set_temp_limit, - .get_temp_alarm = mv88e6xxx_get_temp_alarm, -#endif .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, @@ -4406,23 +4440,25 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_dump = mv88e6xxx_port_mdb_dump, }; -static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, - struct device_node *np) +static struct dsa_switch_driver mv88e6xxx_switch_drv = { + .ops = &mv88e6xxx_switch_ops, +}; + +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) { struct device *dev = chip->dev; struct dsa_switch *ds; - ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + ds = dsa_switch_alloc(dev, DSA_MAX_PORTS); if (!ds) return -ENOMEM; - ds->dev = dev; ds->priv = chip; ds->ops = &mv88e6xxx_switch_ops; dev_set_drvdata(dev, ds); - return dsa_register_switch(ds, np); + return dsa_register_switch(ds, dev); } static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) @@ -4502,18 +4538,18 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) } } - err = mv88e6xxx_mdio_register(chip, np); + err = mv88e6xxx_mdios_register(chip, np); if (err) goto out_g2_irq; - err = mv88e6xxx_register_switch(chip, np); + err = mv88e6xxx_register_switch(chip); if (err) goto out_mdio; return 0; out_mdio: - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); out_g2_irq: if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); @@ -4534,7 +4570,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); - mv88e6xxx_mdio_unregister(chip); + mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) @@ -4568,7 +4604,7 @@ static struct mdio_driver mv88e6xxx_driver = { static int __init mv88e6xxx_init(void) { - register_switch_driver(&mv88e6xxx_switch_ops); + register_switch_driver(&mv88e6xxx_switch_drv); return mdio_driver_register(&mv88e6xxx_driver); } module_init(mv88e6xxx_init); @@ -4576,7 +4612,7 @@ module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { mdio_driver_unregister(&mv88e6xxx_driver); - unregister_switch_driver(&mv88e6xxx_switch_ops); + unregister_switch_driver(&mv88e6xxx_switch_drv); } module_exit(mv88e6xxx_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 3e77071949ab..353e26bea3c3 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) } /* Offset 0x14: EEPROM Command - * Offset 0x15: EEPROM Data + * Offset 0x15: EEPROM Data (for 16-bit data access) + * Offset 0x15: EEPROM Addr (for 8-bit data access) */ static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) @@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_eeprom_wait(chip); } +static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip, + u16 addr, u8 *data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + err = mv88e6xxx_g2_eeprom_cmd(chip, cmd); + if (err) + return err; + + err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd); + if (err) + return err; + + *data = cmd & 0xff; + + return 0; +} + +static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip, + u16 addr, u8 data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data); +} + static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip, u8 addr, u16 *data) { @@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_eeprom_cmd(chip, cmd); } +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_read8(chip, offset, data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + int err; + + eeprom->len = 0; + + while (len) { + err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data); + if (err) + return err; + + eeprom->len++; + offset++; + data++; + len--; + } + + return 0; +} + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) { @@ -410,12 +501,17 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) return mv88e6xxx_g2_smi_phy_wait(chip); } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val) +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; @@ -427,12 +523,17 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val); } -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val) +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; int err; + if (mdio_bus->external) + cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL; + err = mv88e6xxx_g2_smi_phy_wait(chip); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 9aefb7d8b0ad..00e635279ba1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -23,15 +23,24 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) return 0; } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); +int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); +int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); + +int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); +int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); + int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -50,12 +59,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) } static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 *val) { return -EOPNOTSUPP; } static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, u16 val) { return -EOPNOTSUPP; @@ -67,6 +78,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + return -EOPNOTSUPP; +} + static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data) diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index af54baea47cf..9c5c0472b211 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -87,6 +87,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6131 0x106 #define PORT_SWITCH_ID_PROD_NUM_6320 0x115 #define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6141 0x340 #define PORT_SWITCH_ID_PROD_NUM_6161 0x161 #define PORT_SWITCH_ID_PROD_NUM_6165 0x165 #define PORT_SWITCH_ID_PROD_NUM_6171 0x171 @@ -100,6 +101,7 @@ #define PORT_SWITCH_ID_PROD_NUM_6240 0x240 #define PORT_SWITCH_ID_PROD_NUM_6290 0x290 #define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6341 0x341 #define PORT_SWITCH_ID_PROD_NUM_6352 0x352 #define PORT_SWITCH_ID_PROD_NUM_6350 0x371 #define PORT_SWITCH_ID_PROD_NUM_6351 0x375 @@ -382,10 +384,12 @@ #define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) #define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 +#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */ #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 #define GLOBAL2_SMI_PHY_CMD 0x18 #define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15) +#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13) #define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12) #define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \ GLOBAL2_SMI_PHY_CMD_MODE_22 | \ @@ -418,6 +422,7 @@ enum mv88e6xxx_model { MV88E6097, MV88E6123, MV88E6131, + MV88E6141, MV88E6161, MV88E6165, MV88E6171, @@ -432,6 +437,7 @@ enum mv88e6xxx_model { MV88E6290, MV88E6320, MV88E6321, + MV88E6341, MV88E6350, MV88E6351, MV88E6352, @@ -447,6 +453,7 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6341, /* 6141 6341 */ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ @@ -496,12 +503,6 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_STU, - /* Internal temperature sensor. - * Available from any enabled port's PHY register 26, page 6. - */ - MV88E6XXX_CAP_TEMP, - MV88E6XXX_CAP_TEMP_LIMIT, - /* VLAN Table Unit. * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. */ @@ -532,8 +533,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) #define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU) -#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP) -#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT) #define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU) /* Ingress Rate Limit unit */ @@ -566,6 +565,7 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ @@ -584,7 +584,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -603,13 +602,25 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) +#define MV88E6XXX_FLAGS_FAMILY_6341 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_G1_ATU_FID | \ + MV88E6XXX_FLAG_G1_VTU_FID | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_INT | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SERDES) + #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ @@ -619,7 +630,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -635,8 +645,6 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -649,8 +657,6 @@ struct mv88e6xxx_ops; (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_TEMP | \ - MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ @@ -689,10 +695,6 @@ struct mv88e6xxx_vtu_entry { struct mv88e6xxx_bus_ops; -struct mv88e6xxx_priv_port { - struct net_device *bridge_dev; -}; - struct mv88e6xxx_irq { u16 masked; struct irq_chip chip; @@ -733,8 +735,6 @@ struct mv88e6xxx_chip { */ struct mutex stats_mutex; - struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; - /* A switch may have a GPIO line tied to its reset pin. Parse * this from the device tree, and use it before performing * switch soft reset. @@ -744,11 +744,8 @@ struct mv88e6xxx_chip { /* set to size of eeprom if supported by the switch */ int eeprom_len; - /* Device node for the MDIO bus */ - struct device_node *mdio_np; - - /* And the MDIO bus itself */ - struct mii_bus *mdio_bus; + /* List of mdio busses */ + struct list_head mdios; /* There can be two interrupt controllers, which are chained * off a GPIO as interrupt source @@ -764,6 +761,13 @@ struct mv88e6xxx_bus_ops { int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); }; +struct mv88e6xxx_mdio_bus { + struct mii_bus *bus; + struct mv88e6xxx_chip *chip; + struct list_head list; + bool external; +}; + struct mv88e6xxx_ops { int (*get_eeprom)(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); @@ -772,10 +776,12 @@ struct mv88e6xxx_ops { int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr); - int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 *val); - int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 val); + int (*phy_read)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); + int (*phy_write)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 0db7fa0373ae..d380a93b092c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -193,7 +193,7 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port, ctrl = PORT_PCS_CTRL_SPEED_1000; break; case 2500: - ctrl = PORT_PCS_CTRL_SPEED_1000 | PORT_PCS_CTRL_ALTSPEED; + ctrl = PORT_PCS_CTRL_SPEED_10000 | PORT_PCS_CTRL_ALTSPEED; break; case 10000: /* all bits set, fall through... */ diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b3df70d07ff6..a4fd4ccf7b67 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } static int -qca8k_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) +qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask = BIT(QCA8K_CPU_PORT); int i; - priv->port_sts[port].bridge_dev = bridge; - for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != bridge) + if (ds->ports[i].bridge_dev != br) continue; /* Add this port to the portvlan mask of the other ports * in the bridge @@ -775,14 +772,13 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, } static void -qca8k_port_bridge_leave(struct dsa_switch *ds, int port) +qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int i; for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (priv->port_sts[i].bridge_dev != - priv->port_sts[port].bridge_dev) + if (ds->ports[i].bridge_dev != br) continue; /* Remove this port to the portvlan mask of the other ports * in the bridge @@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port) QCA8K_PORT_LOOKUP_CTRL(i), BIT(port)); } - priv->port_sts[port].bridge_dev = NULL; + /* Set the cpu port to be the only one in the portvlan mask of * this port */ @@ -911,7 +907,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_QCA; } -static struct dsa_switch_ops qca8k_switch_ops = { +static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, .get_strings = qca8k_get_strings, @@ -954,17 +950,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); + priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); if (!priv->ds) return -ENOMEM; priv->ds->priv = priv; - priv->ds->dev = &mdiodev->dev; priv->ds->ops = &qca8k_switch_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); - return dsa_register_switch(priv->ds, priv->ds->dev->of_node); + return dsa_register_switch(priv->ds, &mdiodev->dev); } static void diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 201464719531..1ed4fac6cd6d 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -157,7 +157,6 @@ enum qca8k_fdb_cmd { struct ar8xxx_port_status { struct ethtool_eee eee; - struct net_device *bridge_dev; int enabled; }; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 6421835f11b7..2c80611b94ae 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -41,7 +41,48 @@ #define DRV_NAME "dummy" #define DRV_VERSION "1.0" +#undef pr_fmt +#define pr_fmt(fmt) DRV_NAME ": " fmt + static int numdummies = 1; +static int num_vfs; + +struct vf_data_storage { + u8 vf_mac[ETH_ALEN]; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + __be16 vlan_proto; + u16 min_tx_rate; + u16 max_tx_rate; + u8 spoofchk_enabled; + bool rss_query_enabled; + u8 trusted; + int link_state; +}; + +struct dummy_priv { + struct vf_data_storage *vfinfo; +}; + +static int dummy_num_vf(struct device *dev) +{ + return num_vfs; +} + +static struct bus_type dummy_bus = { + .name = "dummy", + .num_vf = dummy_num_vf, +}; + +static void release_dummy_parent(struct device *dev) +{ +} + +static struct device dummy_parent = { + .init_name = "dummy", + .bus = &dummy_bus, + .release = release_dummy_parent, +}; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) @@ -54,8 +95,8 @@ struct pcpu_dstats { struct u64_stats_sync syncp; }; -static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void dummy_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { int i; @@ -73,7 +114,6 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev, stats->tx_bytes += tbytes; stats->tx_packets += tpackets; } - return stats; } static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) @@ -91,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) static int dummy_dev_init(struct net_device *dev) { + struct dummy_priv *priv = netdev_priv(dev); + dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) return -ENOMEM; + priv->vfinfo = NULL; + + if (!num_vfs) + return 0; + + dev->dev.parent = &dummy_parent; + priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!priv->vfinfo) { + free_percpu(dev->dstats); + return -ENOMEM; + } + return 0; } @@ -112,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier) return 0; } +static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) + return -EINVAL; + + memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN); + + return 0; +} + +static int dummy_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + priv->vfinfo[vf].pf_vlan = vlan; + priv->vfinfo[vf].pf_qos = qos; + priv->vfinfo[vf].vlan_proto = vlan_proto; + + return 0; +} + +static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].min_tx_rate = min; + priv->vfinfo[vf].max_tx_rate = max; + + return 0; +} + +static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].spoofchk_enabled = val; + + return 0; +} + +static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].rss_query_enabled = val; + + return 0; +} + +static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].trusted = val; + + return 0; +} + +static int dummy_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + ivi->vf = vf; + memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN); + ivi->vlan = priv->vfinfo[vf].pf_vlan; + ivi->qos = priv->vfinfo[vf].pf_qos; + ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled; + ivi->linkstate = priv->vfinfo[vf].link_state; + ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate; + ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate; + ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled; + ivi->trusted = priv->vfinfo[vf].trusted; + ivi->vlan_proto = priv->vfinfo[vf].vlan_proto; + + return 0; +} + +static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state) +{ + struct dummy_priv *priv = netdev_priv(dev); + + if (vf >= num_vfs) + return -EINVAL; + + priv->vfinfo[vf].link_state = state; + + return 0; +} + static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, @@ -121,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, + .ndo_set_vf_mac = dummy_set_vf_mac, + .ndo_set_vf_vlan = dummy_set_vf_vlan, + .ndo_set_vf_rate = dummy_set_vf_rate, + .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk, + .ndo_set_vf_trust = dummy_set_vf_trust, + .ndo_get_vf_config = dummy_get_vf_config, + .ndo_set_vf_link_state = dummy_set_vf_link_state, + .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en, }; static void dummy_get_drvinfo(struct net_device *dev, @@ -134,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = { .get_drvinfo = dummy_get_drvinfo, }; +static void dummy_free_netdev(struct net_device *dev) +{ + struct dummy_priv *priv = netdev_priv(dev); + + kfree(priv->vfinfo); + free_netdev(dev); +} + static void dummy_setup(struct net_device *dev) { ether_setup(dev); @@ -141,7 +323,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; - dev->destructor = free_netdev; + dev->destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; @@ -172,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, + .priv_size = sizeof(struct dummy_priv), .setup = dummy_setup, .validate = dummy_validate, }; @@ -180,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = { module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); +module_param(num_vfs, int, 0); +MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device"); + static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; - dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup); + dev_dummy = alloc_netdev(sizeof(struct dummy_priv), + "dummy%d", NET_NAME_UNKNOWN, dummy_setup); if (!dev_dummy) return -ENOMEM; @@ -204,6 +391,21 @@ static int __init dummy_init_module(void) { int i, err = 0; + if (num_vfs) { + err = bus_register(&dummy_bus); + if (err < 0) { + pr_err("registering dummy bus failed\n"); + return err; + } + + err = device_register(&dummy_parent); + if (err < 0) { + pr_err("registering dummy parent device failed\n"); + bus_unregister(&dummy_bus); + return err; + } + } + rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) @@ -219,12 +421,22 @@ static int __init dummy_init_module(void) out: rtnl_unlock(); + if (err && num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } + return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); + + if (num_vfs) { + device_unregister(&dummy_parent); + bus_unregister(&dummy_bus); + } } module_init(dummy_init_module); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 9fe3990319ec..084a6d58543a 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); @@ -2370,9 +2370,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * 4) Get the hardware address. * 5) Put the card to sleep. */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err = typhoon_reset(ioaddr, WaitSleep); + if (err < 0) { err_msg = "could not reset 3XP"; - err = -EIO; goto error_out_dma; } @@ -2386,24 +2386,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) typhoon_init_interface(tp); typhoon_init_rings(tp); - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST); + if (err < 0) { err_msg = "cannot boot 3XP sleep image"; - err = -EIO; goto error_out_reset; } INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp); + if (err < 0) { err_msg = "cannot read MAC address"; - err = -EIO; goto error_out_reset; } *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - if(!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { err_msg = "Could not obtain valid ethernet address, aborting"; + err = -EIO; goto error_out_reset; } @@ -2411,7 +2412,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * later when we print out the version reported. */ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp); + if (err < 0) { err_msg = "Could not get Sleep Image version"; goto error_out_reset; } @@ -2428,9 +2430,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if(xp_resp[0].numDesc != 0) tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err = typhoon_sleep(tp, PCI_D3hot, 0); + if (err < 0) { err_msg = "cannot put adapter to sleep"; - err = -EIO; goto error_out_reset; } @@ -2453,7 +2455,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - if(register_netdev(dev) < 0) { + err = register_netdev(dev); + if (err < 0) { err_msg = "unable to register netdev"; goto error_out_reset; } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index e4c28fed61d5..8c08f9deef92 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" @@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 24330f4885a9..26dce5bf2c18 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ @@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ -obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 88164529b52a..a81731303730 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget) } if (i < budget) { - napi_complete(napi); + napi_complete_done(napi, i); if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) enable_irq(IRQ_MAC_RX); } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 93def92f9997..9f7422ada704 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1008,7 +1008,7 @@ restart_txrx_poll: spin_unlock_irqrestore(&greth->devlock, flags); goto restart_txrx_poll; } else { - __napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irqrestore(&greth->devlock, flags); } } diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 831bab352f8e..87a11b9f0ea5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget) et131x_handle_send_pkts(adapter); if (work_done < budget) { - napi_complete(&adapter->napi); + napi_complete_done(&adapter->napi, work_done); et131x_enable_interrupts(adapter); } diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index b21d8aa8d653..15a8096c60df 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1471,8 +1471,8 @@ drop_skb: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *lst) +static void slic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *lst) { struct slic_device *sdev = netdev_priv(dev); struct slic_stats *stats = &sdev->stats; @@ -1489,8 +1489,6 @@ static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev, SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc); SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802); SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier); - - return lst; } static int slic_get_sset_count(struct net_device *dev, int sset) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 25864bff25ee..527908c7e384 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget) if (rxcomplete < budget) { - napi_complete(napi); + napi_complete_done(napi, rxcomplete); netdev_dbg(priv->dev, "NAPI Complete, did %d packets with budget %d\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index cc8b13ebfa75..aca95b397393 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2165,19 +2165,19 @@ err: ena_com_delete_debug_area(adapter->ena_dev); } -static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ena_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_admin_basic_stats ena_stats; int rc; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) - return NULL; + return; rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); if (rc) - return NULL; + return; stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | ena_stats.tx_bytes_low; @@ -2204,8 +2204,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, stats->rx_errors = 0; stats->tx_errors = 0; - - return stats; } static const struct net_device_ops ena_netdev_ops = { diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9595f1bc535b..7b5df562f30f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; int min_pkt_len, status; - unsigned int intr0; int num_rx_pkt = 0; short pkt_len; #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = budget; - unsigned long flags; - if (rx_pkt_limit <= 0) - goto rx_not_empty; + while (num_rx_pkt < budget) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; - do{ - /* process receive packets until we use the quota. - * If we own the next entry, it's a new packet. Send it up. + /* There is a tricky error noted by John Murphy, + * <murf@perftech.com> to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. */ - while(1) { - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); - if (status & OWN_BIT) - break; - - /* There is a tricky error noted by John Murphy, - * <murf@perftech.com> to Russ Nelson: Even with - * full-sized * buffers it's possible for a - * jabber packet to use two buffers, with only - * the last correctly noting the error. - */ - if(status & ERR_BIT) { - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - /* check for STP and ENP */ - if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* resetting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + if (status & ERR_BIT) { + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if (!((status & STP_BIT) && (status & ENP_BIT))){ + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; #if AMD8111E_VLAN_TAG_USED - vtag = status & TT_MASK; - /*MAC will strip vlan tag*/ - if (vtag != 0) - min_pkt_len =MIN_PKT_LEN - 4; + vtag = status & TT_MASK; + /* MAC will strip vlan tag */ + if (vtag != 0) + min_pkt_len = MIN_PKT_LEN - 4; else #endif - min_pkt_len =MIN_PKT_LEN; + min_pkt_len = MIN_PKT_LEN; - if (pkt_len < min_pkt_len) { - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - if(--rx_pkt_limit < 0) - goto rx_not_empty; - new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); - if (!new_skb) { - /* if allocation fail, - * ignore that pkt and go to next one - */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!new_skb) { + /* if allocation fail, + * ignore that pkt and go to next one + */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } - skb_reserve(new_skb, 2); - skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, - new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } -#endif - netif_receive_skb(skb); - /*COAL update rx coalescing parameters*/ - lp->coal_conf.rx_packets++; - lp->coal_conf.rx_bytes += pkt_len; - num_rx_pkt++; - - err_next_pkt: - lp->rx_ring[rx_index].buff_phy_addr - = cpu_to_le32(lp->rx_dma_addr[rx_index]); - lp->rx_ring[rx_index].buff_count = - cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - /* Check the interrupt status register for more packets in the - * mean time. Process them since we have not used up our quota. - */ - intr0 = readl(mmio + INT0); - /*Ack receive packets */ - writel(intr0 & RINT0,mmio + INT0); +#endif + napi_gro_receive(napi, skb); + /* COAL update rx coalescing parameters */ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + +err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } - } while(intr0 & RINT0); + if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) { + unsigned long flags; - if (rx_pkt_limit > 0) { /* Receive descriptor is empty now */ spin_lock_irqsave(&lp->lock, flags); - __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); } -rx_not_empty: return num_rx_pkt; } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 41e58cca8fee..a8a22c0d688a 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) pcnet32_restart(dev, CSR0_START); netif_wake_queue(dev); } - spin_unlock_irqrestore(&lp->lock, flags); - - if (work_done < budget) { - spin_lock_irqsave(&lp->lock, flags); - - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { /* clear interrupt masks */ val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; @@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) /* Set interrupt enable. */ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); - - spin_unlock_irqrestore(&lp->lock, flags); } + + spin_unlock_irqrestore(&lp->lock, flags); return work_done; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1c87cc204075..3aa457c8ca21 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1761,8 +1761,8 @@ static void xgbe_tx_timeout(struct net_device *netdev) schedule_work(&pdata->restart_work); } -static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *s) +static void xgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; @@ -1788,8 +1788,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, s->tx_dropped = netdev->stats.tx_dropped; DBGPR("<--%s\n", __func__); - - return s; } static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 523b8eff6d7b..d0d0d12b531f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) processed = xgene_enet_process_ring(ring, budget); if (processed != budget) { - napi_complete(napi); + napi_complete_done(napi, processed); enable_irq(ring->irq); } @@ -1453,7 +1453,7 @@ err: return ret; } -static struct rtnl_link_stats64 *xgene_enet_get_stats64( +static void xgene_enet_get_stats64( struct net_device *ndev, struct rtnl_link_stats64 *storage) { @@ -1462,7 +1462,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( struct xgene_enet_desc_ring *ring; int i; - memset(stats, 0, sizeof(struct rtnl_link_stats64)); for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { @@ -1484,8 +1483,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( } } memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); - - return storage; } static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) @@ -1967,6 +1964,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) } } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_enet_acpi_match[] = { + { "APMC0D05", XGENE_ENET1}, + { "APMC0D30", XGENE_ENET1}, + { "APMC0D31", XGENE_ENET1}, + { "APMC0D3F", XGENE_ENET1}, + { "APMC0D26", XGENE_ENET2}, + { "APMC0D25", XGENE_ENET2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); +#endif + +static const struct of_device_id xgene_enet_of_match[] = { + {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, + {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, + {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_of_match); + static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; @@ -2113,32 +2134,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev) xgene_enet_remove(pdev); } -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_enet_acpi_match[] = { - { "APMC0D05", XGENE_ENET1}, - { "APMC0D30", XGENE_ENET1}, - { "APMC0D31", XGENE_ENET1}, - { "APMC0D3F", XGENE_ENET1}, - { "APMC0D26", XGENE_ENET2}, - { "APMC0D25", XGENE_ENET2}, - { } -}; -MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); -#endif - -#ifdef CONFIG_OF -static const struct of_device_id xgene_enet_of_match[] = { - {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, - {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, - {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, - {}, -}; - -MODULE_DEVICE_TABLE(of, xgene_enet_of_match); -#endif - static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig new file mode 100644 index 000000000000..cdf78e069a39 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -0,0 +1,24 @@ +# +# aQuantia device configuration +# + +config NET_VENDOR_AQUANTIA + bool "aQuantia devices" + default y + ---help--- + Set this to y if you have an Ethernet network cards that uses the aQuantia + AQC107/AQC108 chipset. + + This option does not build any drivers; it casues the aQuantia + drivers that can be built to appear in the list of Ethernet drivers. + + +if NET_VENDOR_AQUANTIA + +config AQTION + tristate "aQuantia AQtion(tm) Support" + depends on PCI && X86_64 + ---help--- + This enables the support for the aQuantia AQtion(tm) Ethernet card. + +endif # NET_VENDOR_AQUANTIA diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile new file mode 100644 index 000000000000..4f4897b689b2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the aQuantia device drivers. +# + +obj-$(CONFIG_AQTION) += atlantic/ diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile new file mode 100644 index 000000000000..e4ae696920ef --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# aQuantia Ethernet Controller AQtion Linux Driver +# Copyright(c) 2014-2017 aQuantia Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see <http://www.gnu.org/licenses/>. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: <rdc-drv@aquantia.com> +# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA +# +################################################################################ + +# +# Makefile for the AQtion(tm) Ethernet driver +# + +obj-$(CONFIG_AQTION) += atlantic.o + +atlantic-objs := aq_main.o \ + aq_nic.o \ + aq_pci_func.o \ + aq_vec.o \ + aq_ring.o \ + aq_hw_utils.o \ + aq_ethtool.o \ + hw_atl/hw_atl_a0.o \ + hw_atl/hw_atl_b0.o \ + hw_atl/hw_atl_utils.o \ + hw_atl/hw_atl_llh.o diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h new file mode 100644 index 000000000000..5f99237a9d52 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -0,0 +1,77 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_cfg.h: Definition of configuration parameters and constants. */ + +#ifndef AQ_CFG_H +#define AQ_CFG_H + +#define AQ_CFG_VECS_DEF 4U +#define AQ_CFG_TCS_DEF 1U + +#define AQ_CFG_TXDS_DEF 4096U +#define AQ_CFG_RXDS_DEF 1024U + +#define AQ_CFG_IS_POLLING_DEF 0U + +#define AQ_CFG_FORCE_LEGACY_INT 0U + +#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U +#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU +#define AQ_CFG_IRQ_MASK 0x1FFU + +#define AQ_CFG_VECS_MAX 8U +#define AQ_CFG_TCS_MAX 8U + +#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) + +/* LRO */ +#define AQ_CFG_IS_LRO_DEF 1U + +/* RSS */ +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U +#define AQ_CFG_RSS_HASHKEY_SIZE 320U + +#define AQ_CFG_IS_RSS_DEF 1U +#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF +#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U + +#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U +#define AQ_CFG_PCI_FUNC_PORTS 2U + +#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) +#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) + +#define AQ_CFG_SKB_FRAGS_MAX 32U + +#define AQ_CFG_NAPI_WEIGHT 64U + +#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U + +/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ + +#define AQ_CFG_FC_MODE 3U + +#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ + +#define AQ_CFG_IS_AUTONEG_DEF 1U +#define AQ_CFG_MTU_DEF 1514U + +#define AQ_CFG_LOCK_TRYS 100U + +#define AQ_CFG_DRV_AUTHOR "aQuantia" +#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver" +#define AQ_CFG_DRV_NAME "aquantia" +#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ + __stringify(NIC_MINOR_DRIVER_VERSION)"."\ + __stringify(NIC_BUILD_DRIVER_VERSION)"."\ + __stringify(NIC_REVISION_DRIVER_VERSION) + +#endif /* AQ_CFG_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h new file mode 100644 index 000000000000..9eb5e222a234 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -0,0 +1,23 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_common.h: Basic includes for all files in project. */ + +#ifndef AQ_COMMON_H +#define AQ_COMMON_H + +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "ver.h" +#include "aq_nic.h" +#include "aq_cfg.h" +#include "aq_utils.h" + +#endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c new file mode 100644 index 000000000000..a761e91471df --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -0,0 +1,262 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.c: Definition of ethertool related functions. */ + +#include "aq_ethtool.h" +#include "aq_nic.h" + +static void aq_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + memset(p, 0, regs_count * sizeof(u32)); + aq_nic_get_regs(aq_nic, regs, p); +} + +static int aq_ethtool_get_regs_len(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + return regs_count * sizeof(u32); +} + +static u32 aq_ethtool_get_link(struct net_device *ndev) +{ + return ethtool_op_get_link(ndev); +} + +static int aq_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + aq_nic_get_link_ksettings(aq_nic, cmd); + cmd->base.speed = netif_carrier_ok(ndev) ? + aq_nic_get_link_speed(aq_nic) : 0U; + + return 0; +} + +static int +aq_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic_set_link_ksettings(aq_nic, cmd); +} + +/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ +static const unsigned int aq_ethtool_stat_queue_lines = 5U; +static const unsigned int aq_ethtool_stat_queue_chars = + 5U * ETH_GSTRING_LEN; +static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { + "InPackets", + "InUCast", + "InMCast", + "InBCast", + "InErrors", + "OutPackets", + "OutUCast", + "OutMCast", + "OutBCast", + "InUCastOctects", + "OutUCastOctects", + "InMCastOctects", + "OutMCastOctects", + "InBCastOctects", + "OutBCastOctects", + "InOctects", + "OutOctects", + "InPacketsDma", + "OutPacketsDma", + "InOctetsDma", + "OutOctetsDma", + "InDroppedDma", + "Queue[0] InPackets", + "Queue[0] OutPackets", + "Queue[0] InJumboPackets", + "Queue[0] InLroPackets", + "Queue[0] InErrors", + "Queue[1] InPackets", + "Queue[1] OutPackets", + "Queue[1] InJumboPackets", + "Queue[1] InLroPackets", + "Queue[1] InErrors", + "Queue[2] InPackets", + "Queue[2] OutPackets", + "Queue[2] InJumboPackets", + "Queue[2] InLroPackets", + "Queue[2] InErrors", + "Queue[3] InPackets", + "Queue[3] OutPackets", + "Queue[3] InJumboPackets", + "Queue[3] InLroPackets", + "Queue[3] InErrors", + "Queue[4] InPackets", + "Queue[4] OutPackets", + "Queue[4] InJumboPackets", + "Queue[4] InLroPackets", + "Queue[4] InErrors", + "Queue[5] InPackets", + "Queue[5] OutPackets", + "Queue[5] InJumboPackets", + "Queue[5] InLroPackets", + "Queue[5] InErrors", + "Queue[6] InPackets", + "Queue[6] OutPackets", + "Queue[6] InJumboPackets", + "Queue[6] InLroPackets", + "Queue[6] InErrors", + "Queue[7] InPackets", + "Queue[7] OutPackets", + "Queue[7] InJumboPackets", + "Queue[7] InLroPackets", + "Queue[7] InErrors", +}; + +static void aq_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + +/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ + BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); + memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); + aq_nic_get_stats(aq_nic, data); +} + +static void aq_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + u32 firmware_version = aq_nic_get_fw_version(aq_nic); + u32 regs_count = aq_nic_get_regs_count(aq_nic); + + strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver)); + strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u", firmware_version >> 24, + (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU); + + strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = regs_count; + drvinfo->eedump_len = 0; +} + +static void aq_ethtool_get_strings(struct net_device *ndev, + u32 stringset, u8 *data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + if (stringset == ETH_SS_STATS) + memcpy(data, *aq_ethtool_stat_names, + sizeof(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_chars); +} + +static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ + int ret = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + switch (stringset) { + case ETH_SS_STATS: + ret = ARRAY_SIZE(aq_ethtool_stat_names) - + (AQ_CFG_VECS_MAX - cfg->vecs) * + aq_ethtool_stat_queue_lines; + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev) +{ + return AQ_CFG_RSS_INDIRECTION_TABLE_MAX; +} + +static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + return sizeof(cfg->aq_rss.hash_secret_key); +} + +static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + unsigned int i = 0U; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + if (indir) { + for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++) + indir[i] = cfg->aq_rss.indirection_table[i]; + } + if (key) + memcpy(key, cfg->aq_rss.hash_secret_key, + sizeof(cfg->aq_rss.hash_secret_key)); + return 0; +} + +static int aq_ethtool_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = cfg->vecs; + break; + + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +const struct ethtool_ops aq_ethtool_ops = { + .get_link = aq_ethtool_get_link, + .get_regs_len = aq_ethtool_get_regs_len, + .get_regs = aq_ethtool_get_regs, + .get_drvinfo = aq_ethtool_get_drvinfo, + .get_strings = aq_ethtool_get_strings, + .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_rxfh_key_size = aq_ethtool_get_rss_key_size, + .get_rxfh = aq_ethtool_get_rss, + .get_rxnfc = aq_ethtool_get_rxnfc, + .get_sset_count = aq_ethtool_get_sset_count, + .get_ethtool_stats = aq_ethtool_stats, + .get_link_ksettings = aq_ethtool_get_link_ksettings, + .set_link_ksettings = aq_ethtool_set_link_ksettings, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h new file mode 100644 index 000000000000..21c126eeb5eb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -0,0 +1,19 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ethtool.h: Declaration of ethertool related functions. */ + +#ifndef AQ_ETHTOOL_H +#define AQ_ETHTOOL_H + +#include "aq_common.h" + +extern const struct ethtool_ops aq_ethtool_ops; + +#endif /* AQ_ETHTOOL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h new file mode 100644 index 000000000000..fce0fd3f23ff --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -0,0 +1,177 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific + * functions. + */ + +#ifndef AQ_HW_H +#define AQ_HW_H + +#include "aq_common.h" + +/* NIC H/W capabilities */ +struct aq_hw_caps_s { + u64 hw_features; + u64 link_speed_msk; + unsigned int hw_priv_flags; + u32 rxds; + u32 txds; + u32 txhwb_alignment; + u32 irq_mask; + u32 vecs; + u32 mtu; + u32 mac_regs_count; + u8 ports; + u8 msix_irqs; + u8 tcs; + u8 rxd_alignment; + u8 rxd_size; + u8 txd_alignment; + u8 txd_size; + u8 tx_rings; + u8 rx_rings; + bool flow_control; + bool is_64_dma; + u32 fw_ver_expected; +}; + +struct aq_hw_link_status_s { + unsigned int mbps; +}; + +#define AQ_HW_IRQ_INVALID 0U +#define AQ_HW_IRQ_LEGACY 1U +#define AQ_HW_IRQ_MSI 2U +#define AQ_HW_IRQ_MSIX 3U + +#define AQ_HW_POWER_STATE_D0 0U +#define AQ_HW_POWER_STATE_D3 3U + +#define AQ_HW_FLAG_STARTED 0x00000004U +#define AQ_HW_FLAG_STOPPING 0x00000008U +#define AQ_HW_FLAG_RESETTING 0x00000010U +#define AQ_HW_FLAG_CLOSING 0x00000020U +#define AQ_HW_LINK_DOWN 0x04000000U +#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_HW_FLAG_ERR_HW 0x80000000U + +#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG) + +struct aq_hw_s { + struct aq_obj_s header; + struct aq_nic_cfg_s *aq_nic_cfg; + struct aq_pci_func_s *aq_pci_func; + void __iomem *mmio; + unsigned int not_ff_addr; + struct aq_hw_link_status_s aq_link_status; +}; + +struct aq_ring_s; +struct aq_ring_param_s; +struct aq_nic_cfg_s; +struct sk_buff; + +struct aq_hw_ops { + struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func, + unsigned int port, struct aq_hw_ops *ops); + + void (*destroy)(struct aq_hw_s *self); + + int (*get_hw_caps)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps); + + int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int frags); + + int (*hw_ring_rx_receive)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + unsigned int sw_tail_old); + + int (*hw_ring_tx_head_update)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_get_mac_permanent)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + + int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); + + int (*hw_get_link_status)(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + + int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*hw_reset)(struct aq_hw_s *self); + + int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr); + + int (*hw_start)(struct aq_hw_s *self); + + int (*hw_stop)(struct aq_hw_s *self); + + int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_tx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_tx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_init)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param); + + int (*hw_ring_rx_start)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_ring_rx_stop)(struct aq_hw_s *self, + struct aq_ring_s *aq_ring); + + int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask); + + int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask); + + int (*hw_packet_filter_set)(struct aq_hw_s *self, + unsigned int packet_filter); + + int (*hw_multicast_list_set)(struct aq_hw_s *self, + u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count); + + int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, + bool itr_enabled); + + int (*hw_rss_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_rss_hash_set)(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + + int (*hw_get_regs)(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); + + int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, + unsigned int *p_count); + + int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); + + int (*hw_deinit)(struct aq_hw_s *self); + + int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); +}; + +#endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c new file mode 100644 index 000000000000..5f13465995f6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -0,0 +1,68 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.c: Definitions of helper functions used across + * hardware layer. + */ + +#include "aq_hw_utils.h" +#include "aq_hw.h" + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val) +{ + if (msk ^ ~0) { + u32 reg_old, reg_new; + + reg_old = aq_hw_read_reg(aq_hw, addr); + reg_new = (reg_old & (~msk)) | (val << shift); + + if (reg_old != reg_new) + aq_hw_write_reg(aq_hw, addr, reg_new); + } else { + aq_hw_write_reg(aq_hw, addr, val); + } +} + +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift) +{ + return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift); +} + +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) +{ + u32 value = readl(hw->mmio + reg); + + if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr)) + aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG); + + return value; +} + +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) +{ + writel(value, hw->mmio + reg); +} + +int aq_hw_err_from_flags(struct aq_hw_s *hw) +{ + int err = 0; + + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) { + err = -EIO; + goto err_exit; + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h new file mode 100644 index 000000000000..03b72ddbffb9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -0,0 +1,47 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_hw_utils.h: Declaration of helper functions used across hardware + * layer. + */ + +#ifndef AQ_HW_UTILS_H +#define AQ_HW_UTILS_H + +#include "aq_common.h" + +#ifndef HIDWORD +#define LODWORD(_qw) ((u32)(_qw)) +#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff)) +#endif + +#define AQ_HW_SLEEP(_US_) mdelay(_US_) + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ +do { \ + unsigned int AQ_HW_WAIT_FOR_i; \ + for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ + --AQ_HW_WAIT_FOR_i) {\ + udelay(_US_); \ + } \ + if (!AQ_HW_WAIT_FOR_i) {\ + err = -ETIME; \ + } \ +} while (0) + +struct aq_hw_s; + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val); +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +int aq_hw_err_from_flags(struct aq_hw_s *hw); + +#endif /* AQ_HW_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c new file mode 100644 index 000000000000..c17c70adef0d --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -0,0 +1,273 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.c: Main file for aQuantia Linux driver. */ + +#include "aq_main.h" +#include "aq_nic.h" +#include "aq_pci_func.h" +#include "aq_ethtool.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" + +#include <linux/netdevice.h> +#include <linux/module.h> + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), }, + {} +}; + +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(AQ_CFG_DRV_VERSION); +MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); +MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); + +static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev) +{ + struct aq_hw_ops *ops = NULL; + + ops = hw_atl_a0_get_ops_by_id(pdev); + if (!ops) + ops = hw_atl_b0_get_ops_by_id(pdev); + + return ops; +} + +static int aq_ndev_open(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = NULL; + int err = 0; + + aq_nic = aq_nic_alloc_hot(ndev); + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + err = aq_nic_init(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) + aq_nic_deinit(aq_nic); + return err; +} + +static int aq_ndev_close(struct net_device *ndev) +{ + int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + err = aq_nic_stop(aq_nic); + if (err < 0) + goto err_exit; + aq_nic_deinit(aq_nic); + aq_nic_free_hot_resources(aq_nic); + +err_exit: + return err; +} + +static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_xmit(aq_nic, skb); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (new_mtu == ndev->mtu) { + err = 0; + goto err_exit; + } + if (new_mtu < 68) { + err = -EINVAL; + goto err_exit; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); + if (err < 0) + goto err_exit; + ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + +err_exit: + return err; +} + +static int aq_ndev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); + bool is_lro = false; + + if (aq_cfg->hw_features & NETIF_F_LRO) { + is_lro = features & NETIF_F_LRO; + + if (aq_cfg->is_lro != is_lro) { + aq_cfg->is_lro = is_lro; + + if (netif_running(ndev)) { + aq_ndev_close(ndev); + aq_ndev_open(ndev); + } + } + } + + return 0; +} + +static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = eth_mac_addr(ndev, addr); + if (err < 0) + goto err_exit; + err = aq_nic_set_mac(aq_nic, ndev); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void aq_ndev_set_multicast_settings(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + err = aq_nic_set_packet_filter(aq_nic, ndev->flags); + if (err < 0) + goto err_exit; + + if (netdev_mc_count(ndev)) { + err = aq_nic_set_multicast_list(aq_nic, ndev); + if (err < 0) + goto err_exit; + } + +err_exit:; +} + +static const struct net_device_ops aq_ndev_ops = { + .ndo_open = aq_ndev_open, + .ndo_stop = aq_ndev_close, + .ndo_start_xmit = aq_ndev_start_xmit, + .ndo_set_rx_mode = aq_ndev_set_multicast_settings, + .ndo_change_mtu = aq_ndev_change_mtu, + .ndo_set_mac_address = aq_ndev_set_mac_address, + .ndo_set_features = aq_ndev_set_features +}; + +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct aq_hw_ops *aq_hw_ops = NULL; + struct aq_pci_func_s *aq_pci_func = NULL; + int err = 0; + + err = pci_enable_device(pdev); + if (err < 0) + goto err_exit; + aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev); + aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev, + &aq_ndev_ops, &aq_ethtool_ops); + if (!aq_pci_func) { + err = -ENOMEM; + goto err_exit; + } + err = aq_pci_func_init(aq_pci_func); + if (err < 0) + goto err_exit; + +err_exit: + if (err < 0) { + if (aq_pci_func) + aq_pci_func_free(aq_pci_func); + } + return err; +} + +static void aq_pci_remove(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + aq_pci_func_deinit(aq_pci_func); + aq_pci_func_free(aq_pci_func); +} + +static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static int aq_pci_resume(struct pci_dev *pdev) +{ + struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); + pm_message_t pm_msg = PMSG_RESTORE; + + return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); +} + +static struct pci_driver aq_pci_ops = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .suspend = aq_pci_suspend, + .resume = aq_pci_resume, +}; + +static int __init aq_module_init(void) +{ + int err = 0; + + err = pci_register_driver(&aq_pci_ops); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static void __exit aq_module_exit(void) +{ + pci_unregister_driver(&aq_pci_ops); +} + +module_init(aq_module_init); +module_exit(aq_module_exit); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h new file mode 100644 index 000000000000..9748e7e575e0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -0,0 +1,17 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_main.h: Main file for aQuantia Linux driver. */ + +#ifndef AQ_MAIN_H +#define AQ_MAIN_H + +#include "aq_common.h" + +#endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c new file mode 100644 index 000000000000..bed25abd2889 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -0,0 +1,952 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.c: Definition of common code for NIC. */ + +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include "aq_pci_func.h" +#include "aq_nic_internal.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/timer.h> +#include <linux/cpu.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <net/ip.h> + +static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + struct aq_rss_parameters *rss_params = &cfg->aq_rss; + int i = 0; + + static u8 rss_key[40] = { + 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, + 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, + 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, + 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, + 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c + }; + + rss_params->hash_secret_key_size = sizeof(rss_key); + memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); + rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX; + + for (i = rss_params->indirection_table_size; i--;) + rss_params->indirection_table[i] = i & (num_rss_queues - 1); +} + +/* Fills aq_nic_cfg with valid defaults */ +static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + cfg->aq_hw_caps = &self->aq_hw_caps; + + cfg->vecs = AQ_CFG_VECS_DEF; + cfg->tcs = AQ_CFG_TCS_DEF; + + cfg->rxds = AQ_CFG_RXDS_DEF; + cfg->txds = AQ_CFG_TXDS_DEF; + + cfg->is_polling = AQ_CFG_IS_POLLING_DEF; + + cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; + cfg->itr = cfg->is_interrupt_moderation ? + AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; + + cfg->is_rss = AQ_CFG_IS_RSS_DEF; + cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; + cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; + cfg->flow_control = AQ_CFG_FC_MODE; + + cfg->mtu = AQ_CFG_MTU_DEF; + cfg->link_speed_msk = AQ_CFG_SPEED_MSK; + cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF; + + cfg->is_lro = AQ_CFG_IS_LRO_DEF; + + cfg->vlan_id = 0U; + + aq_nic_rss_init(self, cfg->num_rss_queues); +} + +/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ +int aq_nic_cfg_start(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + /*descriptors */ + cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); + cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); + + /*rss rings */ + cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); + cfg->vecs = min(cfg->vecs, num_online_cpus()); + /* cfg->vecs should be power of 2 for RSS */ + if (cfg->vecs >= 8U) + cfg->vecs = 8U; + else if (cfg->vecs >= 4U) + cfg->vecs = 4U; + else if (cfg->vecs >= 2U) + cfg->vecs = 2U; + else + cfg->vecs = 1U; + + cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); + + if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || + (self->aq_hw_caps.vecs == 1U) || + (cfg->vecs == 1U)) { + cfg->is_rss = 0U; + cfg->vecs = 1U; + } + + cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; + cfg->hw_features = self->aq_hw_caps.hw_features; + return 0; +} + +static void aq_nic_service_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct net_device *ndev = aq_nic_get_ndev(self); + int err = 0; + bool is_busy = false; + unsigned int i = 0U; + struct aq_hw_link_status_s link_status; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + atomic_inc(&self->header.busy_count); + is_busy = true; + if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) + goto err_exit; + + err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); + if (err < 0) + goto err_exit; + + self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + + if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { + if (link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + } else { + netif_carrier_off(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + } + + self->link_status = link_status; + } + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); + } + + ndev->stats.rx_packets = stats_rx.packets; + ndev->stats.rx_bytes = stats_rx.bytes; + ndev->stats.rx_errors = stats_rx.errors; + ndev->stats.tx_packets = stats_tx.packets; + ndev->stats.tx_bytes = stats_tx.bytes; + ndev->stats.tx_errors = stats_tx.errors; + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + mod_timer(&self->service_timer, + jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); +} + +static void aq_nic_polling_timer_cb(unsigned long param) +{ + struct aq_nic_s *self = (struct aq_nic_s *)param; + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_isr(i, (void *)aq_vec); + + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); +} + +static struct net_device *aq_nic_ndev_alloc(void) +{ + return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); +} + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops) +{ + struct net_device *ndev = NULL; + struct aq_nic_s *self = NULL; + int err = 0; + + ndev = aq_nic_ndev_alloc(); + self = netdev_priv(ndev); + if (!self) { + err = -EINVAL; + goto err_exit; + } + + ndev->netdev_ops = ndev_ops; + ndev->ethtool_ops = et_ops; + + SET_NETDEV_DEV(ndev, dev); + + ndev->if_port = port; + self->ndev = ndev; + + self->aq_pci_func = aq_pci_func; + + self->aq_hw_ops = *aq_hw_ops; + self->port = (u8)port; + + self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, + &self->aq_hw_ops); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + aq_nic_cfg_init_defaults(self); + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +int aq_nic_ndev_register(struct aq_nic_s *self) +{ + int err = 0; + unsigned int i = 0U; + + if (!self->ndev) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, + self->aq_nic_cfg.aq_hw_caps, + self->ndev->dev_addr); + if (err < 0) + goto err_exit; + +#if defined(AQ_CFG_MAC_ADDR_PERMANENT) + { + static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT; + + ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); + } +#endif + err = register_netdev(self->ndev); + if (err < 0) + goto err_exit; + + self->is_ndev_registered = true; + netif_carrier_off(self->ndev); + + for (i = AQ_CFG_VECS_MAX; i--;) + aq_nic_ndev_queue_stop(self, i); + +err_exit: + return err; +} + +int aq_nic_ndev_init(struct aq_nic_s *self) +{ + struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; + struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; + + self->ndev->hw_features |= aq_hw_caps->hw_features; + self->ndev->features = aq_hw_caps->hw_features; + self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; + self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; + + return 0; +} + +void aq_nic_ndev_free(struct aq_nic_s *self) +{ + if (!self->ndev) + goto err_exit; + + if (self->is_ndev_registered) + unregister_netdev(self->ndev); + + if (self->aq_hw) + self->aq_hw_ops.destroy(self->aq_hw); + + free_netdev(self->ndev); + +err_exit:; +} + +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) +{ + struct aq_nic_s *self = NULL; + int err = 0; + + if (!ndev) { + err = -EINVAL; + goto err_exit; + } + self = netdev_priv(ndev); + + if (!self) { + err = -EINVAL; + goto err_exit; + } + if (netif_running(ndev)) { + unsigned int i; + + for (i = AQ_CFG_VECS_MAX; i--;) + netif_stop_subqueue(ndev, i); + } + + for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; + self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = + aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); + if (!self->aq_vec[self->aq_vecs]) { + err = -ENOMEM; + goto err_exit; + } + } + +err_exit: + if (err < 0) { + aq_nic_free_hot_resources(self); + self = NULL; + } + return self; +} + +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring) +{ + self->aq_ring_tx[idx] = ring; +} + +struct device *aq_nic_get_dev(struct aq_nic_s *self) +{ + return self->ndev->dev.parent; +} + +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) +{ + return self->ndev; +} + +int aq_nic_init(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + self->power_state = AQ_HW_POWER_STATE_D0; + err = self->aq_hw_ops.hw_reset(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, + aq_nic_get_ndev(self)->dev_addr); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); + +err_exit: + return err; +} + +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) +{ + netif_start_subqueue(self->ndev, idx); +} + +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) +{ + netif_stop_subqueue(self->ndev, idx); +} + +int aq_nic_start(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + int err = 0; + unsigned int i = 0U; + + err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + self->packet_filter); + if (err < 0) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_vec_start(aq_vec); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_start(self->aq_hw); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + if (err < 0) + goto err_exit; + setup_timer(&self->service_timer, &aq_nic_service_timer_cb, + (unsigned long)self); + mod_timer(&self->service_timer, jiffies + + AQ_CFG_SERVICE_TIMER_INTERVAL); + + if (self->aq_nic_cfg.is_polling) { + setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb, + (unsigned long)self); + mod_timer(&self->polling_timer, jiffies + + AQ_CFG_POLLING_TIMER_INTERVAL); + } else { + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + err = aq_pci_func_alloc_irq(self->aq_pci_func, i, + self->ndev->name, aq_vec, + aq_vec_get_affinity_mask(aq_vec)); + if (err < 0) + goto err_exit; + } + + err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, + AQ_CFG_IRQ_MASK); + if (err < 0) + goto err_exit; + } + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_start(self, i); + + err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + + err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int frag_count = 0U; + + dx->flags = 0U; + dx->len = skb_headlen(skb); + dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, + DMA_TO_DEVICE); + dx->len_pkt = skb->len; + dx->is_sop = 1U; + dx->is_mapped = 1U; + + ++ret; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; + dx->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; + dx->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + } + + for (; nr_frags--; ++frag_count) { + unsigned int frag_len; + dma_addr_t frag_pa; + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; + + frag_len = skb_frag_size(frag); + + frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, + frag_len, DMA_TO_DEVICE); + + while (frag_len > AQ_CFG_TX_FRAME_MAX) { + ++dx; + ++ret; + dx->flags = 0U; + dx->len = AQ_CFG_TX_FRAME_MAX; + dx->pa = frag_pa; + dx->is_mapped = 1U; + + frag_len -= AQ_CFG_TX_FRAME_MAX; + frag_pa += AQ_CFG_TX_FRAME_MAX; + } + + ++dx; + ++ret; + + dx->flags = 0U; + dx->len = frag_len; + dx->pa = frag_pa; + dx->is_mapped = 1U; + } + + dx->is_eop = 1U; + dx->skb = skb; + + return ret; +} + +static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, + struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + dx->flags = 0U; + dx->len_pkt = skb->len; + dx->len_l2 = ETH_HLEN; + dx->len_l3 = ip_hdrlen(skb); + dx->len_l4 = tcp_hdrlen(skb); + dx->mss = skb_shinfo(skb)->gso_size; + dx->is_txc = 1U; + return 1U; +} + +static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, + struct aq_ring_buff_s *dx) +{ + unsigned int ret = 0U; + + if (unlikely(skb_is_gso(skb))) { + ret = aq_nic_map_skb_lso(self, skb, dx); + ++dx; + } + + ret += aq_nic_map_skb_frag(self, skb, dx); + + return ret; +} + +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) +__releases(&ring->lock) +__acquires(&ring->lock) +{ + struct aq_ring_s *ring = NULL; + unsigned int frags = 0U; + unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; + unsigned int tc = 0U; + unsigned int trys = AQ_CFG_LOCK_TRYS; + int err = 0; + bool is_nic_in_bad_state; + bool is_busy = false; + struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX]; + + frags = skb_shinfo(skb)->nr_frags + 1; + + ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; + + atomic_inc(&self->header.busy_count); + is_busy = true; + + if (frags > AQ_CFG_SKB_FRAGS_MAX) { + dev_kfree_skb_any(skb); + goto err_exit; + } + + is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, + AQ_NIC_FLAGS_IS_NOT_TX_READY) || + (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX); + + if (is_nic_in_bad_state) { + aq_nic_ndev_queue_stop(self, ring->idx); + err = NETDEV_TX_BUSY; + goto err_exit; + } + + do { + if (spin_trylock(&ring->header.lock)) { + frags = aq_nic_map_skb(self, skb, &buffers[0]); + + aq_ring_tx_append_buffs(ring, &buffers[0], frags); + + err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, + ring, frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < + AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop(self, ring->idx); + } + spin_unlock(&ring->header.lock); + + if (err >= 0) { + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + break; + } + } while (--trys); + + if (!trys) { + err = NETDEV_TX_BUSY; + goto err_exit; + } + +err_exit: + if (is_busy) + atomic_dec(&self->header.busy_count); + return err; +} + +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) +{ + int err = 0; + + err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); + if (err < 0) + goto err_exit; + + self->packet_filter = flags; + +err_exit: + return err; +} + +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) +{ + struct netdev_hw_addr *ha = NULL; + unsigned int i = 0U; + + self->mc_list.count = 0U; + + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], ha->addr); + ++self->mc_list.count; + } + + return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); +} + +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) +{ + int err = 0; + + if (new_mtu > self->aq_hw_caps.mtu) { + err = -EINVAL; + goto err_exit; + } + self->aq_nic_cfg.mtu = new_mtu; + +err_exit: + return err; +} + +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) +{ + return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); +} + +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) +{ + return self->link_status.mbps; +} + +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) +{ + u32 *regs_buff = p; + int err = 0; + + regs->version = 1; + + err = self->aq_hw_ops.hw_get_regs(self->aq_hw, + &self->aq_hw_caps, regs_buff); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int aq_nic_get_regs_count(struct aq_nic_s *self) +{ + return self->aq_hw_caps.mac_regs_count; +} + +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + unsigned int count = 0U; + int err = 0; + + err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); + if (err < 0) + goto err_exit; + + data += count; + count = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + data += count; + aq_vec_get_sw_stats(aq_vec, data, &count); + } + +err_exit:; + (void)err; +} + +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd) +{ + u32 supported, advertising; + + cmd->base.port = PORT_TP; + /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0; + supported |= SUPPORTED_Autoneg; + supported |= SUPPORTED_TP; + + advertising = (self->aq_nic_cfg.is_autoneg) ? + ADVERTISED_Autoneg : 0U; + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ? + ADVERTISED_10000baseT_Full : 0U; + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ? + ADVERTISED_1000baseT_Full : 0U; + + advertising |= + (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ? + ADVERTISED_100baseT_Full : 0U; + advertising |= (self->aq_nic_cfg.flow_control) ? + ADVERTISED_Pause : 0U; + advertising |= ADVERTISED_TP; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); +} + +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd) +{ + u32 speed = 0U; + u32 rate = 0U; + int err = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + rate = self->aq_hw_caps.link_speed_msk; + self->aq_nic_cfg.is_autoneg = true; + } else { + speed = cmd->base.speed; + + switch (speed) { + case SPEED_100: + rate = AQ_NIC_RATE_100M; + break; + + case SPEED_1000: + rate = AQ_NIC_RATE_1G; + break; + + case SPEED_2500: + rate = AQ_NIC_RATE_2GS; + break; + + case SPEED_5000: + rate = AQ_NIC_RATE_5G; + break; + + case SPEED_10000: + rate = AQ_NIC_RATE_10G; + break; + + default: + err = -1; + goto err_exit; + break; + } + if (!(self->aq_hw_caps.link_speed_msk & rate)) { + err = -1; + goto err_exit; + } + + self->aq_nic_cfg.is_autoneg = false; + } + + err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); + if (err < 0) + goto err_exit; + + self->aq_nic_cfg.link_speed_msk = rate; + +err_exit: + return err; +} + +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self) +{ + return &self->aq_nic_cfg; +} + +u32 aq_nic_get_fw_version(struct aq_nic_s *self) +{ + u32 fw_version = 0U; + + self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); + + return fw_version; +} + +int aq_nic_stop(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_nic_ndev_queue_stop(self, i); + + del_timer_sync(&self->service_timer); + + self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); + + if (self->aq_nic_cfg.is_polling) + del_timer_sync(&self->polling_timer); + else + aq_pci_func_free_irqs(self->aq_pci_func); + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_stop(aq_vec); + + return self->aq_hw_ops.hw_stop(self->aq_hw); +} + +void aq_nic_deinit(struct aq_nic_s *self) +{ + struct aq_vec_s *aq_vec = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, aq_vec = self->aq_vec[0]; + self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) + aq_vec_deinit(aq_vec); + + if (self->power_state == AQ_HW_POWER_STATE_D0) { + (void)self->aq_hw_ops.hw_deinit(self->aq_hw); + } else { + (void)self->aq_hw_ops.hw_set_power(self->aq_hw, + self->power_state); + } + +err_exit:; +} + +void aq_nic_free_hot_resources(struct aq_nic_s *self) +{ + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = AQ_DIMOF(self->aq_vec); i--;) { + if (self->aq_vec[i]) + aq_vec_free(self->aq_vec[i]); + } + +err_exit:; +} + +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) +{ + int err = 0; + + if (!netif_running(self->ndev)) { + err = 0; + goto err_exit; + } + rtnl_lock(); + if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { + self->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(self->ndev); + netif_tx_stop_all_queues(self->ndev); + + err = aq_nic_stop(self); + if (err < 0) + goto err_exit; + + aq_nic_deinit(self); + } else { + err = aq_nic_init(self); + if (err < 0) + goto err_exit; + + err = aq_nic_start(self); + if (err < 0) + goto err_exit; + + netif_device_attach(self->ndev); + netif_tx_start_all_queues(self->ndev); + } + rtnl_unlock(); + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h new file mode 100644 index 000000000000..7fc2a5ecb2b7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -0,0 +1,110 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic.h: Declaration of common code for NIC. */ + +#ifndef AQ_NIC_H +#define AQ_NIC_H + +#include "aq_common.h" +#include "aq_rss.h" + +struct aq_ring_s; +struct aq_pci_func_s; +struct aq_hw_ops; + +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5GSR BIT(2) +#define AQ_NIC_RATE_2GS BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + +struct aq_nic_cfg_s { + struct aq_hw_caps_s *aq_hw_caps; + u64 hw_features; + u32 rxds; /* rx ring size, descriptors # */ + u32 txds; /* tx ring size, descriptors # */ + u32 vecs; /* vecs==allocated irqs */ + u32 irq_type; + u32 itr; + u32 num_rss_queues; + u32 mtu; + u32 ucp_0x364; + u32 flow_control; + u32 link_speed_msk; + u32 vlan_id; + u16 is_mc_list_enabled; + u16 mc_list_count; + bool is_autoneg; + bool is_interrupt_moderation; + bool is_polling; + bool is_rss; + bool is_lro; + u8 tcs; + struct aq_rss_parameters aq_rss; +}; + +#define AQ_NIC_FLAG_STARTED 0x00000004U +#define AQ_NIC_FLAG_STOPPING 0x00000008U +#define AQ_NIC_FLAG_RESETTING 0x00000010U +#define AQ_NIC_FLAG_CLOSING 0x00000020U +#define AQ_NIC_LINK_DOWN 0x04000000U +#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U +#define AQ_NIC_FLAG_ERR_HW 0x80000000U + +#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ + ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) + +struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, + const struct ethtool_ops *et_ops, + struct device *dev, + struct aq_pci_func_s *aq_pci_func, + unsigned int port, + const struct aq_hw_ops *aq_hw_ops); +int aq_nic_ndev_init(struct aq_nic_s *self); +struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev); +void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, + struct aq_ring_s *ring); +struct device *aq_nic_get_dev(struct aq_nic_s *self); +struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); +int aq_nic_init(struct aq_nic_s *self); +int aq_nic_cfg_start(struct aq_nic_s *self); +int aq_nic_ndev_register(struct aq_nic_s *self); +void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); +void aq_nic_ndev_free(struct aq_nic_s *self); +int aq_nic_start(struct aq_nic_s *self); +int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); +int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); +int aq_nic_get_regs_count(struct aq_nic_s *self); +void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); +int aq_nic_stop(struct aq_nic_s *self); +void aq_nic_deinit(struct aq_nic_s *self); +void aq_nic_free_hot_resources(struct aq_nic_s *self); +int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); +int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); +int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); +int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev); +unsigned int aq_nic_get_link_speed(struct aq_nic_s *self); +void aq_nic_get_link_ksettings(struct aq_nic_s *self, + struct ethtool_link_ksettings *cmd); +int aq_nic_set_link_ksettings(struct aq_nic_s *self, + const struct ethtool_link_ksettings *cmd); +struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); +u32 aq_nic_get_fw_version(struct aq_nic_s *self); +int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); + +#endif /* AQ_NIC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h new file mode 100644 index 000000000000..f81738a71c42 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h @@ -0,0 +1,46 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_nic_internal.h: Definition of private object structure. */ + +#ifndef AQ_NIC_INTERNAL_H +#define AQ_NIC_INTERNAL_H + +struct aq_nic_s { + struct aq_obj_s header; + struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_hw_s *aq_hw; + struct net_device *ndev; + struct aq_pci_func_s *aq_pci_func; + unsigned int aq_vecs; + unsigned int packet_filter; + unsigned int power_state; + bool is_ndev_registered; + u8 port; + struct aq_hw_ops aq_hw_ops; + struct aq_hw_caps_s aq_hw_caps; + struct aq_nic_cfg_s aq_nic_cfg; + struct timer_list service_timer; + struct timer_list polling_timer; + struct aq_hw_link_status_s link_status; + struct { + u32 count; + u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + } mc_list; +}; + +#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ + AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ + AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) + +#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ + AQ_NIC_LINK_DOWN) + +#endif /* AQ_NIC_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c new file mode 100644 index 000000000000..da4bc09dac51 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -0,0 +1,343 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.c: Definition of PCI functions. */ + +#include "aq_pci_func.h" +#include "aq_nic.h" +#include "aq_vec.h" +#include "aq_hw.h" +#include <linux/interrupt.h> + +struct aq_pci_func_s { + struct pci_dev *pdev; + struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; + void __iomem *mmio; + void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; + resource_size_t mmio_pa; + unsigned int msix_entry_mask; + unsigned int irq_type; + unsigned int ports; + bool is_pci_enabled; + bool is_regions; + bool is_pci_using_dac; + struct aq_hw_caps_s aq_hw_caps; + struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS]; +}; + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops) +{ + struct aq_pci_func_s *self = NULL; + int err = 0; + unsigned int port = 0U; + + if (!aq_hw_ops) { + err = -EFAULT; + goto err_exit; + } + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + pci_set_drvdata(pdev, self); + self->pdev = pdev; + + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + if (err < 0) + goto err_exit; + + self->ports = self->aq_hw_caps.ports; + + for (port = 0; port < self->ports; ++port) { + struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, + &pdev->dev, self, + port, aq_hw_ops); + + if (!aq_nic) { + err = -ENOMEM; + goto err_exit; + } + self->port[port] = aq_nic; + } + +err_exit: + if (err < 0) { + if (self) + aq_pci_func_free(self); + self = NULL; + } + + (void)err; + return self; +} + +int aq_pci_func_init(struct aq_pci_func_s *self) +{ + int err = 0; + unsigned int bar = 0U; + unsigned int port = 0U; + unsigned int i = 0U; + + err = pci_enable_device(self->pdev); + if (err < 0) + goto err_exit; + + self->is_pci_enabled = true; + + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); + self->is_pci_using_dac = 1; + } + if (err) { + err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(self->pdev, + DMA_BIT_MASK(32)); + self->is_pci_using_dac = 0; + } + if (err != 0) { + err = -ENOSR; + goto err_exit; + } + + err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); + if (err < 0) + goto err_exit; + + self->is_regions = true; + + pci_set_master(self->pdev); + + for (bar = 0; bar < 4; ++bar) { + if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { + resource_size_t reg_sz; + + self->mmio_pa = pci_resource_start(self->pdev, bar); + if (self->mmio_pa == 0U) { + err = -EIO; + goto err_exit; + } + + reg_sz = pci_resource_len(self->pdev, bar); + if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { + err = -EIO; + goto err_exit; + } + + self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); + if (!self->mmio) { + err = -EIO; + goto err_exit; + } + break; + } + } + + for (i = 0; i < self->aq_hw_caps.msix_irqs; i++) + self->msix_entry[i].entry = i; + + /*enable interrupts */ +#if AQ_CFG_FORCE_LEGACY_INT + self->irq_type = AQ_HW_IRQ_LEGACY; +#else + err = pci_enable_msix(self->pdev, self->msix_entry, + self->aq_hw_caps.msix_irqs); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSIX; + } else { + err = pci_enable_msi(self->pdev); + + if (err >= 0) { + self->irq_type = AQ_HW_IRQ_MSI; + } else { + self->irq_type = AQ_HW_IRQ_LEGACY; + err = 0; + } + } +#endif + + /* net device init */ + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + err = aq_nic_cfg_start(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_init(self->port[port]); + if (err < 0) + goto err_exit; + + err = aq_nic_ndev_register(self->port[port]); + if (err < 0) + goto err_exit; + } + +err_exit: + if (err < 0) + aq_pci_func_deinit(self); + return err; +} + +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, cpumask_t *affinity_mask) +{ + int err = 0; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0, + name, aq_vec); + break; + + case AQ_HW_IRQ_MSI: + err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec); + break; + + case AQ_HW_IRQ_LEGACY: + err = request_irq(self->pdev->irq, aq_vec_isr_legacy, + IRQF_SHARED, name, aq_vec); + break; + + default: + err = -EFAULT; + break; + } + + if (err >= 0) { + self->msix_entry_mask |= (1 << i); + self->aq_vec[i] = aq_vec; + + if (self->irq_type == AQ_HW_IRQ_MSIX) + irq_set_affinity_hint(self->msix_entry[i].vector, + affinity_mask); + } + + return err; +} + +void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +{ + unsigned int i = 0U; + + for (i = 32U; i--;) { + if (!((1U << i) & self->msix_entry_mask)) + continue; + + switch (self->irq_type) { + case AQ_HW_IRQ_MSIX: + irq_set_affinity_hint(self->msix_entry[i].vector, NULL); + free_irq(self->msix_entry[i].vector, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_MSI: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + case AQ_HW_IRQ_LEGACY: + free_irq(self->pdev->irq, self->aq_vec[i]); + break; + + default: + break; + } + + self->msix_entry_mask &= ~(1U << i); + } +} + +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) +{ + return self->mmio; +} + +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) +{ + return self->irq_type; +} + +void aq_pci_func_deinit(struct aq_pci_func_s *self) +{ + if (!self) + goto err_exit; + + aq_pci_func_free_irqs(self); + + switch (self->irq_type) { + case AQ_HW_IRQ_MSI: + pci_disable_msi(self->pdev); + break; + + case AQ_HW_IRQ_MSIX: + pci_disable_msix(self->pdev); + break; + + case AQ_HW_IRQ_LEGACY: + break; + + default: + break; + } + + if (self->is_regions) + pci_release_regions(self->pdev); + + if (self->is_pci_enabled) + pci_disable_device(self->pdev); + +err_exit:; +} + +void aq_pci_func_free(struct aq_pci_func_s *self) +{ + unsigned int port = 0U; + + if (!self) + goto err_exit; + + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + aq_nic_ndev_free(self->port[port]); + } + + kfree(self); + +err_exit:; +} + +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg) +{ + int err = 0; + unsigned int port = 0U; + + if (!self) { + err = -EFAULT; + goto err_exit; + } + for (port = 0; port < self->ports; ++port) { + if (!self->port[port]) + continue; + + (void)aq_nic_change_pm_state(self->port[port], pm_msg); + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h new file mode 100644 index 000000000000..ecb033791203 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_pci_func.h: Declaration of PCI functions. */ + +#ifndef AQ_PCI_FUNC_H +#define AQ_PCI_FUNC_H + +#include "aq_common.h" + +struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops, + struct pci_dev *pdev, + const struct net_device_ops *ndev_ops, + const struct ethtool_ops *eth_ops); +int aq_pci_func_init(struct aq_pci_func_s *self); +int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, + char *name, void *aq_vec, + cpumask_t *affinity_mask); +void aq_pci_func_free_irqs(struct aq_pci_func_s *self); +int aq_pci_func_start(struct aq_pci_func_s *self); +void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self); +unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self); +void aq_pci_func_deinit(struct aq_pci_func_s *self); +void aq_pci_func_free(struct aq_pci_func_s *self); +int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, + pm_message_t *pm_msg); + +#endif /* AQ_PCI_FUNC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c new file mode 100644 index 000000000000..dea9e9bbb8e7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -0,0 +1,375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.c: Definition of functions for Rx/Tx rings. */ + +#include "aq_ring.h" +#include "aq_nic.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic) +{ + int err = 0; + + self->buff_ring = + kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); + + if (!self->buff_ring) { + err = -ENOMEM; + goto err_exit; + } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); + if (!self->dx_ring) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->txds; + self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + self->aq_nic = aq_nic; + self->idx = idx; + self->size = aq_nic_cfg->rxds; + self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; + + self = aq_ring_alloc(self, aq_nic); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + +err_exit: + if (err < 0) { + aq_ring_free(self); + self = NULL; + } + return self; +} + +int aq_ring_init(struct aq_ring_s *self) +{ + self->hw_head = 0; + self->sw_head = 0; + self->sw_tail = 0; + return 0; +} + +void aq_ring_tx_append_buffs(struct aq_ring_s *self, + struct aq_ring_buff_s *buffer, + unsigned int buffers) +{ + if (likely(self->sw_tail + buffers < self->size)) { + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * buffers); + } else { + unsigned int first_part = self->size - self->sw_tail; + unsigned int second_part = buffers - first_part; + + memcpy(&self->buff_ring[self->sw_tail], buffer, + sizeof(buffer[0]) * first_part); + + memcpy(&self->buff_ring[0], &buffer[first_part], + sizeof(buffer[0]) * second_part); + } +} + +int aq_ring_tx_clean(struct aq_ring_s *self) +{ + struct device *dev = aq_nic_get_dev(self->aq_nic); + + for (; self->sw_head != self->hw_head; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) + dma_unmap_single(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } + + if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX) + aq_nic_ndev_queue_start(self->aq_nic, self->idx); + + return 0; +} + +static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, + unsigned int t) +{ + return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); +} + +#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +{ + struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); + int err = 0; + bool is_rsc_completed = true; + + for (; (self->sw_head != self->hw_head) && budget; + self->sw_head = aq_ring_next_dx(self, self->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + unsigned int i = 0U; + struct aq_ring_buff_s *buff_ = NULL; + + if (buff->is_error) { + __free_pages(buff->page, 0); + continue; + } + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + for (next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_]) { + is_rsc_completed = + aq_ring_dx_in_range(self->sw_head, + next_, + self->hw_head); + + if (unlikely(!is_rsc_completed)) { + is_rsc_completed = false; + break; + } + + if (buff_->is_eop) + break; + } + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + } + + /* for single fragment packets use build_skb() */ + if (buff->is_eop) { + skb = build_skb(page_address(buff->page), + buff->len + AQ_SKB_ALIGN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + + skb->dev = ndev; + skb_put(skb, buff->len); + } else { + skb = netdev_alloc_skb(ndev, ETH_HLEN); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_exit; + } + skb_put(skb, ETH_HLEN); + memcpy(skb->data, page_address(buff->page), ETH_HLEN); + + skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, + buff->len - ETH_HLEN, + SKB_TRUESIZE(buff->len - ETH_HLEN)); + + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; true; + next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } + } + + skb->protocol = eth_type_trans(skb, ndev); + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + __skb_mark_checksum_bad(skb); + } else { + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + + skb_record_rx_queue(skb, self->idx); + + netif_receive_skb(skb); + + ++self->stats.rx.packets; + self->stats.rx.bytes += skb->len; + } + +err_exit: + return err; +} + +int aq_ring_rx_fill(struct aq_ring_s *self) +{ + struct aq_ring_buff_s *buff = NULL; + int err = 0; + int i = 0; + + for (i = aq_ring_avail_dx(self); i--; + self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { + buff = &self->buff_ring[self->sw_tail]; + + buff->flags = 0U; + buff->len = AQ_CFG_RX_FRAME_MAX; + + buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, 0); + if (!buff->page) { + err = -ENOMEM; + goto err_exit; + } + + buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), + buff->page, 0, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { + err = -ENOMEM; + goto err_exit; + } + + buff = NULL; + } + +err_exit: + if (err < 0) { + if (buff && buff->page) + __free_pages(buff->page, 0); + } + + return err; +} + +void aq_ring_rx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + + dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, + AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); + + __free_pages(buff->page, 0); + } + +err_exit:; +} + +void aq_ring_tx_deinit(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + for (; self->sw_head != self->sw_tail; + self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct device *ndev = aq_nic_get_dev(self->aq_nic); + + if (likely(buff->is_mapped)) { + if (unlikely(buff->is_sop)) { + dma_unmap_single(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ndev, buff->pa, buff->len, + DMA_TO_DEVICE); + } + } + + if (unlikely(buff->is_eop)) + dev_kfree_skb_any(buff->skb); + } +err_exit:; +} + +void aq_ring_free(struct aq_ring_s *self) +{ + if (!self) + goto err_exit; + + kfree(self->buff_ring); + + if (self->dx_ring) + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size, self->dx_ring, + self->dx_ring_pa); + +err_exit:; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h new file mode 100644 index 000000000000..0ac3f9e7bee6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -0,0 +1,157 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */ + +#ifndef AQ_RING_H +#define AQ_RING_H + +#include "aq_common.h" + +struct page; + +/* TxC SOP DX EOP + * +----------+----------+----------+----------- + * 8bytes|len l3,l4 | pa | pa | pa + * +----------+----------+----------+----------- + * 4/8bytes|len pkt |len pkt | | skb + * +----------+----------+----------+----------- + * 4/8bytes|is_txc |len,flags |len |len,is_eop + * +----------+----------+----------+----------- + * + * This aq_ring_buff_s doesn't have endianness dependency. + * It is __packed for cache line optimizations. + */ +struct __packed aq_ring_buff_s { + union { + /* RX */ + struct { + u32 rss_hash; + u16 next; + u8 is_hash_l4; + u8 rsvd1; + struct page *page; + }; + /* EOP */ + struct { + dma_addr_t pa_eop; + struct sk_buff *skb; + }; + /* DX */ + struct { + dma_addr_t pa; + }; + /* SOP */ + struct { + dma_addr_t pa_sop; + u32 len_pkt_sop; + }; + /* TxC */ + struct { + u32 mss; + u8 len_l2; + u8 len_l3; + u8 len_l4; + u8 rsvd2; + u32 len_pkt; + }; + }; + union { + struct { + u32 len:16; + u32 is_ip_cso:1; + u32 is_udp_cso:1; + u32 is_tcp_cso:1; + u32 is_cso_err:1; + u32 is_sop:1; + u32 is_eop:1; + u32 is_txc:1; + u32 is_mapped:1; + u32 is_cleaned:1; + u32 is_error:1; + u32 rsvd3:6; + }; + u32 flags; + }; +}; + +struct aq_ring_stats_rx_s { + u64 errors; + u64 packets; + u64 bytes; + u64 lro_packets; + u64 jumbo_packets; +}; + +struct aq_ring_stats_tx_s { + u64 errors; + u64 packets; + u64 bytes; +}; + +union aq_ring_stats_s { + struct aq_ring_stats_rx_s rx; + struct aq_ring_stats_tx_s tx; +}; + +struct aq_ring_s { + struct aq_obj_s header; + struct aq_ring_buff_s *buff_ring; + u8 *dx_ring; /* descriptors ring, dma shared mem */ + struct aq_nic_s *aq_nic; + unsigned int idx; /* for HW layer registers operations */ + unsigned int hw_head; + unsigned int sw_head; + unsigned int sw_tail; + unsigned int size; /* descriptors number */ + unsigned int dx_size; /* TX or RX descriptor size, */ + /* stored here for fater math */ + union aq_ring_stats_s stats; + dma_addr_t dx_ring_pa; +}; + +struct aq_ring_param_s { + unsigned int vec_idx; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, + unsigned int dx) +{ + return (++dx >= self->size) ? 0U : dx; +} + +static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self) +{ + return (((self->sw_tail >= self->sw_head)) ? + (self->size - 1) - self->sw_tail + self->sw_head : + self->sw_head - self->sw_tail - 1); +} + +struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_init(struct aq_ring_s *self); +void aq_ring_tx_deinit(struct aq_ring_s *self); +void aq_ring_rx_deinit(struct aq_ring_s *self); +void aq_ring_free(struct aq_ring_s *self); +void aq_ring_tx_append_buffs(struct aq_ring_s *ring, + struct aq_ring_buff_s *buffer, + unsigned int buffers); +int aq_ring_tx_clean(struct aq_ring_s *self); +int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_fill(struct aq_ring_s *self); + +#endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h new file mode 100644 index 000000000000..1db6eb20a8f2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h @@ -0,0 +1,26 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_rss.h: Receive Side Scaling definitions. */ + +#ifndef AQ_RSS_H +#define AQ_RSS_H + +#include "aq_common.h" +#include "aq_cfg.h" + +struct aq_rss_parameters { + u16 base_cpu_number; + u16 indirection_table_size; + u16 hash_secret_key_size; + u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)]; + u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX]; +}; + +#endif /* AQ_RSS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h new file mode 100644 index 000000000000..4446bd90fd86 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -0,0 +1,50 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_utils.h: Useful macro and structures used in all layers of driver. */ + +#ifndef AQ_UTILS_H +#define AQ_UTILS_H + +#include "aq_common.h" + +#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) + +struct aq_obj_s { + spinlock_t lock; /* spinlock for nic/rings processing */ + atomic_t flags; + atomic_t busy_count; +}; + +static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old | (mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask) +{ + unsigned long flags_old, flags_new; + + do { + flags_old = atomic_read(flags); + flags_new = flags_old & ~(mask); + } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old); +} + +static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask) +{ + return atomic_read(flags) & mask; +} + +#endif /* AQ_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c new file mode 100644 index 000000000000..cb30a6396a70 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -0,0 +1,392 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. + * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. + */ + +#include "aq_vec.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + +#include <linux/netdevice.h> + +struct aq_vec_s { + struct aq_obj_s header; + struct aq_hw_ops *aq_hw_ops; + struct aq_hw_s *aq_hw; + struct aq_nic_s *aq_nic; + unsigned int tx_rings; + unsigned int rx_rings; + struct aq_ring_param_s aq_ring_param; + struct napi_struct napi; + struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; +}; + +#define AQ_VEC_TX_ID 0 +#define AQ_VEC_RX_ID 1 + +static int aq_vec_poll(struct napi_struct *napi, int budget) +__releases(&self->lock) +__acquires(&self->lock) +{ + struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); + struct aq_ring_s *ring = NULL; + int work_done = 0; + int err = 0; + unsigned int i = 0U; + unsigned int sw_tail_old = 0U; + bool was_tx_cleaned = false; + + if (!self) { + err = -EINVAL; + } else if (spin_trylock(&self->header.lock)) { + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + if (self->aq_hw_ops->hw_ring_tx_head_update) { + err = self->aq_hw_ops->hw_ring_tx_head_update( + self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + } + + if (ring[AQ_VEC_TX_ID].sw_head != + ring[AQ_VEC_TX_ID].hw_head) { + err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + was_tx_cleaned = true; + } + + err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + if (ring[AQ_VEC_RX_ID].sw_head != + ring[AQ_VEC_RX_ID].hw_head) { + err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + &work_done, + budget - work_done); + if (err < 0) + goto err_exit; + + sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill( + self->aq_hw, + &ring[AQ_VEC_RX_ID], sw_tail_old); + if (err < 0) + goto err_exit; + } + } + + if (was_tx_cleaned) + work_done = budget; + + if (work_done < budget) { + napi_complete_done(napi, work_done); + self->aq_hw_ops->hw_irq_enable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + } + +err_exit: + spin_unlock(&self->header.lock); + } + + return work_done; +} + +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + struct aq_vec_s *self = NULL; + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + err = -ENOMEM; + goto err_exit; + } + + self->aq_nic = aq_nic; + self->aq_ring_param.vec_idx = idx; + self->aq_ring_param.cpu = + idx + aq_nic_cfg->aq_rss.base_cpu_number; + + cpumask_set_cpu(self->aq_ring_param.cpu, + &self->aq_ring_param.affinity_mask); + + self->tx_rings = 0; + self->rx_rings = 0; + + netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, + aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + + for (i = 0; i < aq_nic_cfg->tcs; ++i) { + unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, + self->tx_rings, + self->aq_ring_param.vec_idx); + + ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->tx_rings; + + aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, + idx_ring, aq_nic_cfg); + if (!ring) { + err = -ENOMEM; + goto err_exit; + } + + ++self->rx_rings; + } + +err_exit: + if (err < 0) { + aq_vec_free(self); + self = NULL; + } + return self; +} + +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + self->aq_hw_ops = aq_hw_ops; + self->aq_hw = aq_hw; + + spin_lock_init(&self->header.lock); + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = aq_ring_init(&ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, + &ring[AQ_VEC_TX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_init(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, + &ring[AQ_VEC_RX_ID], + &self->aq_ring_param); + if (err < 0) + goto err_exit; + + err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, + &ring[AQ_VEC_RX_ID], 0U); + if (err < 0) + goto err_exit; + } + +err_exit: + return err; +} + +int aq_vec_start(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + int err = 0; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + if (err < 0) + goto err_exit; + + err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + if (err < 0) + goto err_exit; + } + + napi_enable(&self->napi); + +err_exit: + return err; +} + +void aq_vec_stop(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, + &ring[AQ_VEC_TX_ID]); + + self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, + &ring[AQ_VEC_RX_ID]); + } + + napi_disable(&self->napi); +} + +void aq_vec_deinit(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]); + aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); + } +err_exit:; +} + +void aq_vec_free(struct aq_vec_s *self) +{ + struct aq_ring_s *ring = NULL; + unsigned int i = 0U; + + if (!self) + goto err_exit; + + for (i = 0U, ring = self->ring[0]; + self->tx_rings > i; ++i, ring = self->ring[i]) { + aq_ring_free(&ring[AQ_VEC_TX_ID]); + aq_ring_free(&ring[AQ_VEC_RX_ID]); + } + + netif_napi_del(&self->napi); + + kfree(self); + +err_exit:; +} + +irqreturn_t aq_vec_isr(int irq, void *private) +{ + struct aq_vec_s *self = private; + int err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + napi_schedule(&self->napi); + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +irqreturn_t aq_vec_isr_legacy(int irq, void *private) +{ + struct aq_vec_s *self = private; + u64 irq_mask = 0U; + irqreturn_t err = 0; + + if (!self) { + err = -EINVAL; + goto err_exit; + } + err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); + if (err < 0) + goto err_exit; + + if (irq_mask) { + self->aq_hw_ops->hw_irq_disable(self->aq_hw, + 1U << self->aq_ring_param.vec_idx); + napi_schedule(&self->napi); + } else { + self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); + err = IRQ_NONE; + } + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) +{ + return &self->aq_ring_param.affinity_mask; +} + +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx) +{ + struct aq_ring_s *ring = NULL; + unsigned int r = 0U; + + for (r = 0U, ring = self->ring[0]; + self->tx_rings > r; ++r, ring = self->ring[r]) { + struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; + struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; + + stats_rx->packets += rx->packets; + stats_rx->bytes += rx->bytes; + stats_rx->errors += rx->errors; + stats_rx->jumbo_packets += rx->jumbo_packets; + stats_rx->lro_packets += rx->lro_packets; + + stats_tx->packets += tx->packets; + stats_tx->bytes += tx->bytes; + stats_tx->errors += tx->errors; + } +} + +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) +{ + unsigned int count = 0U; + struct aq_ring_stats_rx_s stats_rx; + struct aq_ring_stats_tx_s stats_tx; + + memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); + memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + aq_vec_add_stats(self, &stats_rx, &stats_tx); + + data[count] += stats_rx.packets; + data[++count] += stats_tx.packets; + data[++count] += stats_rx.jumbo_packets; + data[++count] += stats_rx.lro_packets; + data[++count] += stats_rx.errors; + + if (p_count) + *p_count = ++count; + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h new file mode 100644 index 000000000000..6c68b184236c --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -0,0 +1,42 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings. + * Declaration of functions for Rx and Tx rings. + */ + +#ifndef AQ_VEC_H +#define AQ_VEC_H + +#include "aq_common.h" +#include <linux/irqreturn.h> + +struct aq_hw_s; +struct aq_hw_ops; +struct aq_ring_stats_rx_s; +struct aq_ring_stats_tx_s; + +irqreturn_t aq_vec_isr(int irq, void *private); +irqreturn_t aq_vec_isr_legacy(int irq, void *private); +struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, + struct aq_hw_s *aq_hw); +void aq_vec_deinit(struct aq_vec_s *self); +void aq_vec_free(struct aq_vec_s *self); +int aq_vec_start(struct aq_vec_s *self); +void aq_vec_stop(struct aq_vec_s *self); +cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); +int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, + unsigned int *p_count); +void aq_vec_add_stats(struct aq_vec_s *self, + struct aq_ring_stats_rx_s *stats_rx, + struct aq_ring_stats_tx_s *stats_tx); + +#endif /* AQ_VEC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c new file mode 100644 index 000000000000..1f388054a6c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -0,0 +1,905 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_a0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_a0_internal.h" + +static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_a0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_a0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_A0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_A0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX * + HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_A0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + rpf_vlan_prom_mode_en_set(self, 1); + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + hw_atl_a0_hw_init_tx_path(self); + hw_atl_a0_hw_init_rx_path(self); + + hw_atl_a0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + reg_tx_dma_debug_ctl_set(self, 0x800000b8U); + reg_tx_dma_debug_ctl_set(self, 0x000000b8U); + + hw_atl_a0_hw_qos_set(self); + hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | + ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | + ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); + + hw_atl_a0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_A0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_A0_TXD_CTL_CMD_TCP | + HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_a0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_A0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ + if ((1U << 4) & + reg_rx_dma_desc_status_get(self, ring->idx)) { + rdm_rx_desc_en_set(self, false, ring->idx); + rdm_rx_desc_res_set(self, true, ring->idx); + rdm_rx_desc_res_set(self, false, ring->idx); + rdm_rx_desc_en_set(self, true, ring->idx); + } + + if (ring->hw_head || + (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) { + break; + } else if (!(rxd_wb->status & 0x1U)) { + struct hw_atl_rxd_wb_s *rxd_wb1 = + (struct hw_atl_rxd_wb_s *) + (&ring->dx_ring[(1U) * + HW_ATL_A0_RXD_SIZE]); + + if ((rxd_wb1->status & 0x1U)) { + rxd_wb->pkt_len = 1514U; + rxd_wb->status = 3U; + } else { + break; + } + } + } + + buff = &ring->buff_ring[ring->hw_head]; + + if (0x3U != (rxd_wb->status & 0x3U)) + rxd_wb->status |= 4; + + is_err = (0x0000001CU & rxd_wb->status); + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; + } + + is_err &= ~0x18U; + is_err &= ~0x04U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + /* jumbo */ + buff->next = aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask) | + (1U << HW_ATL_A0_ERR_INT)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + if ((1U << 16) & reg_gen_irq_status_get(self)) + + atomic_inc(&PHAL_ATLANTIC_A0->dpc); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = + IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { + err = EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_A0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_A0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_A0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + if (self->aq_nic_cfg->itr != 0xFFFFU) { + u32 itr_ = (self->aq_nic_cfg->itr >> 1); + + itr_ = min(AQ_CFG_IRQ_MASK, itr_); + + PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | + (itr_ << 0x10); + } else { + u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); + + if (n < self->aq_link_status.mbps) { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } else { + static unsigned int hw_timers_tbl_[] = { + 0x01CU, /* 10Gbit */ + 0x039U, /* 5Gbit */ + 0x039U, /* 5Gbit 5GS */ + 0x073U, /* 2.5Gbit */ + 0x120U, /* 1Gbit */ + 0x1FFU, /* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_A0->itr_rx = + 0x80000000U | + (hw_timers_tbl_[speed_index] << 0x10U); + } + + aq_hw_write_reg(self, 0x00002A00U, 0x40000000U); + aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); + } + } else { + PHAL_ATLANTIC_A0->itr_rx = 0U; + } + + for (i = HW_ATL_A0_RINGS_MAX; i--;) + reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_a0_create, + .destroy = hw_atl_a0_destroy, + .get_hw_caps = hw_atl_a0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_a0_hw_set_speed, + .hw_init = hw_atl_a0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_a0_hw_reset, + .hw_start = hw_atl_a0_hw_start, + .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, + .hw_stop = hw_atl_a0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_a0_hw_irq_enable, + .hw_irq_disable = hw_atl_a0_hw_irq_disable, + .hw_irq_read = hw_atl_a0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_a0_hw_rss_set, + .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 1U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h new file mode 100644 index 000000000000..6e1d527954c9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_A0_H +#define HW_ATL_A0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_A0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h new file mode 100644 index 000000000000..1093ea18823a --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -0,0 +1,155 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific + * constants. + */ + +#ifndef HW_ATL_A0_INTERNAL_H +#define HW_ATL_A0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_A0_MTU_JUMBO 9014U + +#define HW_ATL_A0_TX_RINGS 4U +#define HW_ATL_A0_RX_RINGS 4U + +#define HW_ATL_A0_RINGS_MAX 32U +#define HW_ATL_A0_TXD_SIZE 16U +#define HW_ATL_A0_RXD_SIZE 16U + +#define HW_ATL_A0_MAC 0U +#define HW_ATL_A0_MAC_MIN 1U +#define HW_ATL_A0_MAC_MAX 33U + +/* interrupts */ +#define HW_ATL_A0_ERR_INT 8U +#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU + +#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U +#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U +#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U + +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U +#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U +#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U +#define HW_ATL_A0_TXD_CTL_DD 0x00100000U +#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U + +#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U + +#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_A0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_A0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_A0_RATE_10G BIT(0) +#define HW_ATL_A0_RATE_5G BIT(1) +#define HW_ATL_A0_RATE_2G5 BIT(3) +#define HW_ATL_A0_RATE_1G BIT(4) +#define HW_ATL_A0_RATE_100M BIT(5) + +#define HW_ATL_A0_TXBUF_MAX 160U +#define HW_ATL_A0_RXBUF_MAX 320U + +#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U + +#define HW_ATL_A0_TC_MAX 1U +#define HW_ATL_A0_RSS_MAX 8U + +#define HW_ATL_A0_FW_SEMA_RAM 0x2U + +#define HW_ATL_A0_RXD_DD 0x1U +#define HW_ATL_A0_RXD_NCEA0 0x1U + +#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U + +#define HW_ATL_A0_UCP_0X370_REG 0x370U + +#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_A0_RSS_MAX, + .tcs = HW_ATL_A0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_A0_RXD_SIZE, + .rxds = 248U, + .txd_alignment = 1U, + .txd_size = HW_ATL_A0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_A0_TX_RINGS, + .rx_rings = HW_ATL_A0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_A0_RATE_10G | + HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_A0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c new file mode 100644 index 000000000000..e7e694f693bd --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -0,0 +1,958 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_ring.h" +#include "hw_atl_b0.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_b0_internal.h" + +static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + return 0; +} + +static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func, + unsigned int port, + struct aq_hw_ops *ops) +{ + struct hw_atl_s *self = NULL; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) + goto err_exit; + + self->base.aq_pci_func = aq_pci_func; + + self->base.not_ff_addr = 0x10U; + +err_exit: + return (struct aq_hw_s *)self; +} + +static void hw_atl_b0_destroy(struct aq_hw_s *self) +{ + kfree(self); +} + +static int hw_atl_b0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + glb_glb_reg_res_dis_set(self, 1U); + pci_pci_reg_res_dis_set(self, 0U); + rx_rx_reg_res_dis_set(self, 0U); + tx_tx_reg_res_dis_set(self, 0U); + + HW_ATL_FLUSH(); + glb_soft_res_set(self, 1); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + itr_irq_reg_res_dis_set(self, 0U); + itr_res_irq_set(self, 1U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + + hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + bool is_rx_flow_control = false; + + /* TPS Descriptor rate init */ + tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_B0_TXBUF_MAX; + + tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 66U) / + 100U, tc); + tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + buff_size = HW_ATL_B0_RXBUF_MAX; + + rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_nic_cfg_s *cfg = NULL; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + cfg = self->aq_nic_cfg; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + __swab32(rss_params->hash_secret_key[i]) : 0U; + rpf_rss_key_wr_data_set(self, key_data); + rpf_rss_key_addr_set(self, addr); + rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 i = 0U; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + int err = 0; + u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * + HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + + memset(bitary, 0, sizeof(bitary)); + + for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { + (*(u32 *)(bitary + ((i * 3U) / 16U))) |= + ((indirection_table[i] % num_rss_queues) << + ((i * 3U) & 0xFU)); + } + + for (i = AQ_DIMOF(bitary); i--;) { + rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + rpf_rss_redir_tbl_addr_set(self, i); + rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + if (err < 0) + goto err_exit; + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg) +{ + int err = 0; + unsigned int i; + + /* TX checksums offloads*/ + tpo_ipv4header_crc_offload_en_set(self, 1); + tpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* RX checksums offloads*/ + rpo_ipv4header_crc_offload_en_set(self, 1); + rpo_tcp_udp_crc_offload_en_set(self, 1); + if (err < 0) + goto err_exit; + + /* LSO offloads*/ + tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + if (err < 0) + goto err_exit; + +/* LRO offloads */ + { + unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + rpo_lro_max_num_of_descriptors_set(self, val, i); + + rpo_lro_time_base_divider_set(self, 0x61AU); + rpo_lro_inactive_interval_set(self, 0); + rpo_lro_max_coalescing_interval_set(self, 2); + + rpo_lro_qsessions_lim_set(self, 1U); + + rpo_lro_total_desc_lim_set(self, 2U); + + rpo_lro_patch_optimization_en_set(self, 0U); + + rpo_lro_min_pay_of_first_pkt_set(self, 10U); + + rpo_lro_pkt_lim_set(self, 1U); + + rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + } + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) +{ + thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + tdm_tx_dca_en_set(self, 0U); + tdm_tx_dca_mode_set(self, 0U); + + tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + rpb_rpf_rx_traf_class_mode_set(self, 1U); + + /* Rx flow control */ + rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_B0_MAC_MAX; i--;) { + rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + rpfl2unicast_flr_act_set(self, 1U, i); + } + + reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + rpf_vlan_outer_etht_set(self, 0x88A8U); + rpf_vlan_inner_etht_set(self, 0x8100U); + + if (cfg->vlan_id) { + rpf_vlan_flr_act_set(self, 1U, 0U); + rpf_vlan_id_flr_set(self, 0U, 0U); + rpf_vlan_flr_en_set(self, 0U, 0U); + + rpf_vlan_accept_untagged_packets_set(self, 1U); + rpf_vlan_untagged_act_set(self, 1U); + + rpf_vlan_flr_act_set(self, 1U, 1U); + rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); + rpf_vlan_flr_en_set(self, 1U, 1U); + } else { + rpf_vlan_prom_mode_en_set(self, 1); + } + + /* Rx Interrupts */ + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + aq_hw_write_reg(self, 0x00005040U, + IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); + + rpfl2broadcast_flr_act_set(self, 1U); + rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + rdm_rx_dca_en_set(self, 0U); + rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_init(struct aq_hw_s *self, + struct aq_nic_cfg_s *aq_nic_cfg, + u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + + self->aq_nic_cfg = aq_nic_cfg; + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_B0->chip_features); + + hw_atl_b0_hw_init_tx_path(self); + hw_atl_b0_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + + hw_atl_b0_hw_qos_set(self); + hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + reg_gen_irq_map_set(self, + ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_start(struct aq_hw_s *self) +{ + tpb_tx_buff_en_set(self, 1); + rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; +} + +static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int frags) +{ + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_txd_s *txd = NULL; + unsigned int buff_pa_len = 0U; + unsigned int pkt_len = 0U; + unsigned int frag_count = 0U; + bool is_gso = false; + + buff = &ring->buff_ring[ring->sw_tail]; + pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; + + for (frag_count = 0; frag_count < frags; frag_count++) { + txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * + HW_ATL_B0_TXD_SIZE]; + txd->ctl = 0; + txd->ctl2 = 0; + txd->buf_addr = 0; + + buff = &ring->buff_ring[ring->sw_tail]; + + if (buff->is_txc) { + txd->ctl |= (buff->len_l3 << 31) | + (buff->len_l2 << 24) | + HW_ATL_B0_TXD_CTL_CMD_TCP | + HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; + txd->ctl2 |= (buff->mss << 16) | + (buff->len_l4 << 8) | + (buff->len_l3 >> 1); + + pkt_len -= (buff->len_l4 + + buff->len_l3 + + buff->len_l2); + is_gso = true; + } else { + buff_pa_len = buff->len; + + txd->buf_addr = buff->pa; + txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & + ((u32)buff_pa_len << 4)); + txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; + /* PAY_LEN */ + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); + + if (is_gso) { + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; + txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; + } + + /* Tx checksum offloads */ + if (buff->is_ip_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; + + if (buff->is_udp_cso || buff->is_tcp_cso) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; + + if (unlikely(buff->is_eop)) { + txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + } + } + + ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); + } + + hw_atl_b0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + rdm_rx_desc_en_set(self, false, aq_ring->idx); + + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + + reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); + + reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); + + rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->idx); + + rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_rx_set(self, true, aq_ring->idx); + + rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; + u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + + reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); + + reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); + + tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + + hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); + + /* Set Tx threshold */ + tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + + /* Mapping interrupt vector */ + itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + itr_irq_map_en_tx_set(self, true, aq_ring->idx); + + tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, + struct aq_ring_s *ring, + unsigned int sw_tail_old) +{ + for (; sw_tail_old != ring->sw_tail; + sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { + struct hw_atl_rxd_s *rxd = + (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * + HW_ATL_B0_RXD_SIZE]; + + struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; + + rxd->buf_addr = buff->pa; + rxd->hdr_addr = 0U; + } + + reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + int err = 0; + unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + + if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + struct device *ndev = aq_nic_get_dev(ring->aq_nic); + + for (; ring->hw_head != ring->sw_tail; + ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { + struct aq_ring_buff_s *buff = NULL; + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) + &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; + + unsigned int is_err = 1U; + unsigned int is_rx_check_sum_enabled = 0U; + unsigned int pkt_type = 0U; + + if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ + break; + } + + buff = &ring->buff_ring[ring->hw_head]; + + is_err = (0x0000003CU & rxd_wb->status); + + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); + is_err &= ~0x20U; /* exclude validity bit */ + + pkt_type = 0xFFU & (rxd_wb->type >> 4); + + if (is_rx_check_sum_enabled) { + if (0x0U == (pkt_type & 0x3U)) + buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + + if (0x4U == (pkt_type & 0x1CU)) + buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + else if (0x0U == (pkt_type & 0x1CU)) + buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; + } + + is_err &= ~0x18U; + + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); + + if (is_err || rxd_wb->type & 0x1000U) { + /* status error or DMA error */ + buff->is_error = 1U; + } else { + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; + } + } + + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = (rxd_wb->pkt_len & + (AQ_CFG_RX_FRAME_MAX - 1U)); + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; + } else { + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; + } + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + atomic_inc(&PHAL_ATLANTIC_B0->dpc); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + unsigned int i = 0U; + + rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); + + rpfl2_accept_all_mc_packets_set(self, + IS_FILTER_ENABLED(IFF_ALLMULTI)); + + rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + + self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); + + for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); + + return aq_hw_err_from_flags(self); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_CFG_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + int err = 0; + + if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { + err = -EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; + self->aq_nic_cfg->mc_list_count < count; + ++self->aq_nic_cfg->mc_list_count) { + u32 i = self->aq_nic_cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_B0_MAC_MIN + i); + + rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_B0_MAC_MIN + i); + + rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_B0_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, + bool itr_enabled) +{ + unsigned int i = 0U; + + if (itr_enabled && self->aq_nic_cfg->itr) { + tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + tdm_tdm_intr_moder_en_set(self, 1U); + rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + rdm_rdm_intr_moder_en_set(self, 1U); + + PHAL_ATLANTIC_B0->itr_tx = 2U; + PHAL_ATLANTIC_B0->itr_rx = 2U; + + if (self->aq_nic_cfg->itr != 0xFFFFU) { + unsigned int max_timer = self->aq_nic_cfg->itr / 2U; + unsigned int min_timer = self->aq_nic_cfg->itr / 32U; + + max_timer = min(0x1FFU, max_timer); + min_timer = min(0xFFU, min_timer); + + PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; + PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; + PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; + } else { + static unsigned int hw_atl_b0_timers_table_tx_[][2] = { + {0xffU, 0xffU}, /* 10Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit */ + {0xffU, 0x1ffU}, /* 5Gbit 5GS */ + {0xffU, 0x1ffU}, /* 2.5Gbit */ + {0xffU, 0x1ffU}, /* 1Gbit */ + {0xffU, 0x1ffU}, /* 100Mbit */ + }; + + static unsigned int hw_atl_b0_timers_table_rx_[][2] = { + {0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + + unsigned int speed_index = + hw_atl_utils_mbps_2_speed_index( + self->aq_link_status.mbps); + + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_tx |= + hw_atl_b0_timers_table_tx_[speed_index] + [1] << 0x10U; /* set max timer value */ + + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [0] << 0x8U; /* set min timer value */ + PHAL_ATLANTIC_B0->itr_rx |= + hw_atl_b0_timers_table_rx_[speed_index] + [1] << 0x10U; /* set max timer value */ + } + } else { + tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + tdm_tdm_intr_moder_en_set(self, 0U); + rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + rdm_rdm_intr_moder_en_set(self, 0U); + PHAL_ATLANTIC_B0->itr_tx = 0U; + PHAL_ATLANTIC_B0->itr_rx = 0U; + } + + for (i = HW_ATL_B0_RINGS_MAX; i--;) { + reg_tx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_tx, i); + reg_rx_intr_moder_ctrl_set(self, + PHAL_ATLANTIC_B0->itr_rx, i); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_stop(struct aq_hw_s *self) +{ + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, + struct aq_ring_s *ring) +{ + rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed) +{ + int err = 0; + + err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +static struct aq_hw_ops hw_atl_ops_ = { + .create = hw_atl_b0_create, + .destroy = hw_atl_b0_destroy, + .get_hw_caps = hw_atl_b0_get_hw_caps, + + .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, + .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, + .hw_get_link_status = hw_atl_utils_mpi_get_link_status, + .hw_set_link_speed = hw_atl_b0_hw_set_speed, + .hw_init = hw_atl_b0_hw_init, + .hw_deinit = hw_atl_utils_hw_deinit, + .hw_set_power = hw_atl_utils_hw_set_power, + .hw_reset = hw_atl_b0_hw_reset, + .hw_start = hw_atl_b0_hw_start, + .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, + .hw_stop = hw_atl_b0_hw_stop, + + .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_b0_hw_irq_enable, + .hw_irq_disable = hw_atl_b0_hw_irq_disable, + .hw_irq_read = hw_atl_b0_hw_irq_read, + + .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, + .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl_b0_hw_rss_set, + .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_get_regs = hw_atl_utils_hw_get_regs, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, +}; + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev) +{ + bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); + bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || + (pdev->device == HW_ATL_DEVICE_ID_D100) || + (pdev->device == HW_ATL_DEVICE_ID_D107) || + (pdev->device == HW_ATL_DEVICE_ID_D108) || + (pdev->device == HW_ATL_DEVICE_ID_D109)); + + bool is_rev_ok = (pdev->revision == 2U); + + return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h new file mode 100644 index 000000000000..a1e1bce6c1f3 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -0,0 +1,34 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_B0_H +#define HW_ATL_B0_H + +#include "../aq_common.h" + +#ifndef PCI_VENDOR_ID_AQUANTIA + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A +#define HW_ATL_DEVICE_ID_0001 0x0001 +#define HW_ATL_DEVICE_ID_D100 0xD100 +#define HW_ATL_DEVICE_ID_D107 0xD107 +#define HW_ATL_DEVICE_ID_D108 0xD108 +#define HW_ATL_DEVICE_ID_D109 0xD109 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" + +#endif + +struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev); + +#endif /* HW_ATL_B0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h new file mode 100644 index 000000000000..8bdee3ddd5a0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -0,0 +1,207 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific + * constants. + */ + +#ifndef HW_ATL_B0_INTERNAL_H +#define HW_ATL_B0_INTERNAL_H + +#include "../aq_common.h" + +#define HW_ATL_B0_MTU_JUMBO (16000U) +#define HW_ATL_B0_MTU 1514U + +#define HW_ATL_B0_TX_RINGS 4U +#define HW_ATL_B0_RX_RINGS 4U + +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_TXD_SIZE (16U) +#define HW_ATL_B0_RXD_SIZE (16U) + +#define HW_ATL_B0_MAC 0U +#define HW_ATL_B0_MAC_MIN 1U +#define HW_ATL_B0_MAC_MAX 33U + +/* UCAST/MCAST filters */ +#define HW_ATL_B0_UCAST_FILTERS_MAX 38 +#define HW_ATL_B0_MCAST_FILTERS_MAX 8 + +/* interrupts */ +#define HW_ATL_B0_ERR_INT 8U +#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000) +#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000) +#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000) + +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001) +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002) +#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0) +#define HW_ATL_B0_TXD_CTL_DD (0x00100000) +#define HW_ATL_B0_TXD_CTL_EOP (0x00200000) + +#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000) + +#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_B0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_B0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_B0_RATE_10G BIT(0) +#define HW_ATL_B0_RATE_5G BIT(1) +#define HW_ATL_B0_RATE_2G5 BIT(3) +#define HW_ATL_B0_RATE_1G BIT(4) +#define HW_ATL_B0_RATE_100M BIT(5) + +#define HW_ATL_B0_TXBUF_MAX 160U +#define HW_ATL_B0_RXBUF_MAX 320U + +#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U +#define HW_ATL_B0_RSS_HASHKEY_BITS 320U + +#define HW_ATL_B0_TCRSS_4_8 1 +#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_RSS_MAX 8U + +#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_RS_SLIP_ENABLED 0U + +/* (256k -1(max pay_len) - 54(header)) */ +#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U + +/* (256k -1(max pay_len) - 74(header)) */ +#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U + +#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U +#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU + +#define HW_ATL_B0_FW_SEMA_RAM 0x2U + +#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000) + +#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007) +#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008) +#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0) +#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000) +#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000) + +#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */ +#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */ +#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000) + +#define HW_ATL_B0_RXD_DD (0x1) +#define HW_ATL_B0_RXD_NCEA0 (0x1) + +#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F) +#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0) +#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000) +#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000) +#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000) + +#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001) +#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002) +#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C) +#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004) +#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008) +#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010) +#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0) +#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000) + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define HW_ATL_B0_UCP_0X370_REG (0x370) + +#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10) + +#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U + +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + +/* HW layer capabilities */ +static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { + .ports = 1U, + .is_64_dma = true, + .msix_irqs = 4U, + .irq_mask = ~0U, + .vecs = HW_ATL_B0_RSS_MAX, + .tcs = HW_ATL_B0_TC_MAX, + .rxd_alignment = 1U, + .rxd_size = HW_ATL_B0_RXD_SIZE, + .rxds = 8U * 1024U, + .txd_alignment = 1U, + .txd_size = HW_ATL_B0_TXD_SIZE, + .txds = 8U * 1024U, + .txhwb_alignment = 4096U, + .tx_rings = HW_ATL_B0_TX_RINGS, + .rx_rings = HW_ATL_B0_RX_RINGS, + .hw_features = NETIF_F_HW_CSUM | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_LRO, + .hw_priv_flags = IFF_UNICAST_FLT, + .link_speed_msk = (HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M), + .flow_control = true, + .mtu = HW_ATL_B0_MTU_JUMBO, + .mac_regs_count = 88, + .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED, +}; + +#endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c new file mode 100644 index 000000000000..3de651afa8c7 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -0,0 +1,1394 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" +#include "../aq_hw_utils.h" + +/* global */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) +{ + aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem); +} + +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +{ + return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore)); +} + +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr, + glb_reg_res_dis_msk, + glb_reg_res_dis_shift, + glb_reg_res_dis); +} + +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +{ + aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk, + glb_soft_res_shift, soft_res); +} + +u32 glb_soft_res_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr, + glb_soft_res_msk, + glb_soft_res_shift); +} + +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr); +} + +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, glb_mif_id_adr); +} + +/* stats */ +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); +} + +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); +} + +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); +} + +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); +} + +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr); +} + +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); +} + +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr); +} + +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); +} + +/* interrupt */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw) +{ + aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); +} + +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, + 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, + 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); +} + +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +{ + aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); +} + +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr, + itr_reg_res_dsbl_msk, + itr_reg_res_dsbl_shift, irq_reg_res_dis); +} + +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) +{ + aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); +} + +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, itr_isrlsw_adr); +} + +u32 itr_res_irq_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift); +} + +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +{ + aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk, + itr_res_shift, res_irq); +} + +/* rdm */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca), + rdm_dcadcpuid_msk, + rdm_dcadcpuid_shift, cpuid); +} + +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, + rdm_dca_en_shift, rx_dca_en); +} + +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, + rdm_dca_mode_shift, rx_dca_mode); +} + +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor), + rdm_descddata_size_msk, + rdm_descddata_size_shift, + rx_desc_data_buff_size); +} + +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca), + rdm_dcaddesc_en_msk, + rdm_dcaddesc_en_shift, + rx_desc_dca_en); +} + +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor), + rdm_descden_msk, + rdm_descden_shift, + rx_desc_en); +} + +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor), + rdm_descdhdr_size_msk, + rdm_descdhdr_size_shift, + rx_desc_head_buff_size); +} + +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor), + rdm_descdhdr_split_msk, + rdm_descdhdr_split_shift, + rx_desc_head_splitting); +} + +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor), + rdm_descdhd_msk, rdm_descdhd_shift); +} + +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor), + rdm_descdlen_msk, rdm_descdlen_shift, + rx_desc_len); +} + +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor), + rdm_descdreset_msk, rdm_descdreset_shift, + rx_desc_res); +} + +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr, + rdm_int_desc_wrb_en_msk, + rdm_int_desc_wrb_en_shift, + rx_desc_wr_wb_irq_en); +} + +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca), + rdm_dcadhdr_en_msk, + rdm_dcadhdr_en_shift, + rx_head_dca_en); +} + +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca), + rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + rx_pld_dca_en); +} + +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en) +{ + aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr, + rdm_int_rim_en_msk, + rdm_int_rim_en_shift, + rdm_intr_moder_en); +} + +/* reg */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx) +{ + aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); +} + +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, gen_intr_stat_adr); +} + +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +{ + aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); +} + +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +{ + aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr); +} + +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + rx_dma_desc_base_addrlsw); +} + +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor)); +} + +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + rx_dma_desc_tail_ptr); +} + +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); +} + +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); +} + +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1) +{ + aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); +} + +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2) +{ + aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2); +} + +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue), + rx_intr_moderation_ctl); +} + +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl) +{ + aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); +} + +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + tx_dma_desc_base_addrlsw); +} + +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + tx_dma_desc_base_addrmsw); +} + +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, u32 descriptor) +{ + aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + tx_dma_desc_tail_ptr); +} + +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr, + rpb_dma_sys_lbk_msk, + rpb_dma_sys_lbk_shift, dma_sys_lbk); +} + +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr, + rpb_rpf_rx_tc_mode_msk, + rpb_rpf_rx_tc_mode_shift, + rx_traf_class_mode); +} + +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, + rpb_rx_buf_en_shift, rx_buff_en); +} + +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer), + rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + rx_buff_hi_threshold_per_tc); +} + +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer), + rpb_rxblo_thresh_msk, + rpb_rxblo_thresh_shift, + rx_buff_lo_threshold_per_tc); +} + +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr, + rpb_rx_fc_mode_msk, + rpb_rx_fc_mode_shift, rx_flow_ctl_mode); +} + +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer), + rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + rx_pkt_buff_size_per_tc); +} + +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer), + rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr, + rpfl2bc_thresh_msk, + rpfl2bc_thresh_shift, + l2broadcast_count_threshold); +} + +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, + rpfl2bc_en_shift, l2broadcast_en); +} + +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, + rpfl2bc_act_shift, l2broadcast_flr_act); +} + +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter), + rpfl2mc_enf_msk, + rpfl2mc_enf_shift, l2multicast_flr_en); +} + +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr, + rpfl2promis_mode_msk, + rpfl2promis_mode_shift, + l2promiscuous_mode_en); +} + +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter), + rpfl2uc_actf_msk, rpfl2uc_actf_shift, + l2unicast_flr_act); +} + +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter), + rpfl2uc_enf_msk, + rpfl2uc_enf_shift, l2unicast_flr_en); +} + +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter), + l2unicast_dest_addresslsw); +} + +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter), + rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + l2unicast_dest_addressmsw); +} + +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr, + rpfl2mc_accept_all_msk, + rpfl2mc_accept_all_shift, + l2_accept_all_mc_packets); +} + +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, + 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr, + rpf_rss_key_addr_msk, + rpf_rss_key_addr_shift, + rss_key_addr); +} + +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +{ + aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr, + rss_key_wr_data); +} + +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift); +} + +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, + rpf_rss_key_wr_eni_msk, + rpf_rss_key_wr_eni_shift, + rss_key_wr_en); +} + +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr, + rpf_rss_redir_addr_msk, + rpf_rss_redir_addr_shift, rss_redir_tbl_addr); +} + +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr, + rpf_rss_redir_wr_data_msk, + rpf_rss_redir_wr_data_shift, + rss_redir_tbl_wr_data); +} + +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift); +} + +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, + rpf_rss_redir_wr_eni_msk, + rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); +} + +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr, + rpf_tpo_rpf_sys_lbk_msk, + rpf_tpo_rpf_sys_lbk_shift, + tpo_to_rpf_sys_lbk); +} + +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr, + rpf_vl_inner_tpid_msk, + rpf_vl_inner_tpid_shift, + vlan_inner_etht); +} + +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr, + rpf_vl_outer_tpid_msk, + rpf_vl_outer_tpid_shift, + vlan_outer_etht); +} + +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr, + rpf_vl_promis_mode_msk, + rpf_vl_promis_mode_shift, + vlan_prom_mode_en); +} + +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr, + rpf_vl_accept_untagged_mode_msk, + rpf_vl_accept_untagged_mode_shift, + vlan_accept_untagged_packets); +} + +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr, + rpf_vl_untagged_act_msk, + rpf_vl_untagged_act_shift, + vlan_untagged_act); +} + +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter), + rpf_vl_en_f_msk, + rpf_vl_en_f_shift, + vlan_flr_en); +} + +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter), + rpf_vl_act_f_msk, + rpf_vl_act_f_shift, + vlan_flr_act); +} + +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter), + rpf_vl_id_f_msk, + rpf_vl_id_f_shift, + vlan_id_flr); +} + +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter), + rpf_et_enf_msk, + rpf_et_enf_shift, etht_flr_en); +} + +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter), + rpf_et_upfen_msk, rpf_et_upfen_shift, + etht_user_priority_en); +} + +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter), + rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + etht_rx_queue_en); +} + +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter), + rpf_et_upf_msk, + rpf_et_upf_shift, etht_user_priority); +} + +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter), + rpf_et_rxqf_msk, + rpf_et_rxqf_shift, etht_rx_queue); +} + +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter), + rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + etht_mgt_queue); +} + +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter), + rpf_et_actf_msk, + rpf_et_actf_shift, etht_flr_act); +} + +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter), + rpf_et_valf_msk, + rpf_et_valf_shift, etht_flr); +} + +/* RPO: rx packet offload */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr, + rpo_ipv4chk_en_msk, + rpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor), + rpo_descdvl_strip_msk, + rpo_descdvl_strip_shift, + rx_desc_vlan_stripping); +} + +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, + rpol4chk_en_shift, tcp_udp_crc_offload_en); +} + +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +{ + aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en); +} + +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr, + rpo_lro_ptopt_en_msk, + rpo_lro_ptopt_en_shift, + lro_patch_optimization_en); +} + +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr, + rpo_lro_qses_lmt_msk, + rpo_lro_qses_lmt_shift, + lro_qsessions_lim); +} + +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr, + rpo_lro_tot_dsc_lmt_msk, + rpo_lro_tot_dsc_lmt_shift, + lro_total_desc_lim); +} + +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr, + rpo_lro_pkt_min_msk, + rpo_lro_pkt_min_shift, + lro_min_pld_of_first_pkt); +} + +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +{ + aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); +} + +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr, + rpo_lro_tb_div_msk, + rpo_lro_tb_div_shift, + lro_time_base_divider); +} + +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr, + rpo_lro_ina_ival_msk, + rpo_lro_ina_ival_shift, + lro_inactive_interval); +} + +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval) +{ + aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr, + rpo_lro_max_ival_msk, + rpo_lro_max_ival_shift, + lro_max_coalescing_interval); +} + +/* rx */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr, + rx_reg_res_dsbl_msk, + rx_reg_res_dsbl_shift, + rx_reg_res_dis); +} + +/* tdm */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca), + tdm_dcadcpuid_msk, + tdm_dcadcpuid_shift, cpuid); +} + +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) +{ + aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en); +} + +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, + tdm_dca_en_shift, tx_dca_en); +} + +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, + tdm_dca_mode_shift, tx_dca_mode); +} + +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca), + tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + tx_desc_dca_en); +} + +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor), + tdm_descden_msk, + tdm_descden_shift, + tx_desc_en); +} + +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor), + tdm_descdhd_msk, tdm_descdhd_shift); +} + +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor), + tdm_descdlen_msk, + tdm_descdlen_shift, + tx_desc_len); +} + +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr, + tdm_int_desc_wrb_en_msk, + tdm_int_desc_wrb_en_shift, + tx_desc_wr_wb_irq_en); +} + +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor), + tdm_descdwrb_thresh_msk, + tdm_descdwrb_thresh_shift, + tx_desc_wr_wb_threshold); +} + +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) +{ + aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr, + tdm_int_mod_en_msk, + tdm_int_mod_en_shift, + tdm_irq_moderation_en); +} + +/* thm */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr, + thm_lso_tcp_flag_first_msk, + thm_lso_tcp_flag_first_shift, + lso_tcp_flag_of_first_pkt); +} + +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr, + thm_lso_tcp_flag_last_msk, + thm_lso_tcp_flag_last_shift, + lso_tcp_flag_of_last_pkt); +} + +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr, + thm_lso_tcp_flag_mid_msk, + thm_lso_tcp_flag_mid_shift, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, + tpb_tx_buf_en_shift, tx_buff_en); +} + +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer), + tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + tx_buff_hi_threshold_per_tc); +} + +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer), + tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + tx_buff_lo_threshold_per_tc); +} + +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr, + tpb_dma_sys_lbk_msk, + tpb_dma_sys_lbk_shift, + tx_dma_sys_lbk_en); +} + +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer), + tpb_txbbuf_size_msk, + tpb_txbbuf_size_shift, + tx_pkt_buff_size_per_tc); +} + +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) +{ + aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr, + tpb_tx_scp_ins_en_msk, + tpb_tx_scp_ins_en_shift, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr, + tpo_ipv4chk_en_msk, + tpo_ipv4chk_en_shift, + ipv4header_crc_offload_en); +} + +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr, + tpol4chk_en_msk, + tpol4chk_en_shift, + tcp_udp_crc_offload_en); +} + +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr, + tpo_pkt_sys_lbk_msk, + tpo_pkt_sys_lbk_shift, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr, + tps_data_tc_arb_mode_msk, + tps_data_tc_arb_mode_shift, + tx_pkt_shed_data_arb_mode); +} + +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr, + tps_desc_rate_ta_rst_msk, + tps_desc_rate_ta_rst_shift, + curr_time_res); +} + +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr, + tps_desc_rate_lim_msk, + tps_desc_rate_lim_shift, + tx_pkt_shed_desc_rate_lim); +} + +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr, + tps_desc_tc_arb_mode_msk, + tps_desc_tc_arb_mode_shift, + tx_pkt_shed_desc_tc_arb_mode); +} + +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc), + tps_desc_tctcredit_max_msk, + tps_desc_tctcredit_max_shift, + tx_pkt_shed_desc_tc_max_credit); +} + +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc), + tps_desc_tctweight_msk, + tps_desc_tctweight_shift, + tx_pkt_shed_desc_tc_weight); +} + +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr, + tps_desc_vm_arb_mode_msk, + tps_desc_vm_arb_mode_shift, + tx_pkt_shed_desc_vm_arb_mode); +} + +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc), + tps_data_tctcredit_max_msk, + tps_data_tctcredit_max_shift, + tx_pkt_shed_tc_data_max_credit); +} + +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc), + tps_data_tctweight_msk, + tps_data_tctweight_shift, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr, + tx_reg_res_dsbl_msk, + tx_reg_res_dsbl_shift, tx_reg_res_dis); +} + +/* msm */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr, + msm_reg_access_busy_msk, + msm_reg_access_busy_shift); +} + +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr, + msm_reg_addr_msk, + msm_reg_addr_shift, + reg_addr_for_indirect_addr); +} + +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr, + msm_reg_rd_strobe_msk, + msm_reg_rd_strobe_shift, + reg_rd_strobe); +} + +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr); +} + +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +{ + aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data); +} + +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +{ + aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr, + msm_reg_wr_strobe_msk, + msm_reg_wr_strobe_shift, + reg_wr_strobe); +} + +/* pci */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr, + pci_reg_res_dsbl_msk, + pci_reg_res_dsbl_shift, + pci_reg_res_dis); +} + +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + glb_cpu_scratch_scp); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h new file mode 100644 index 000000000000..ed1085b95adb --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -0,0 +1,677 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include <linux/types.h> + +struct aq_hw_s; + +/* global */ + +/* set global microprocessor semaphore */ +void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); + +/* get global microprocessor semaphore */ +u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); + +/* set global register reset disable */ +void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 glb_soft_res_get(struct aq_hw_s *aq_hw); + +/* stats */ + +u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter msw */ +u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter msw */ +u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter msw */ +u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter msw */ +u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get msm rx errors counter register */ +u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); + +/* get msm tx errors counter register */ +u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get global mif identification */ +u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); + +/* set interrupt mapping enable tx */ +void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); + +/* set interrupt mapping rx */ +void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); + +/* get reset interrupt */ +u32 itr_res_irq_get(struct aq_hw_s *aq_hw); + +/* set reset interrupt */ +void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); + +/* rdm */ + +/* set cpu id */ +void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); + +/* set rx descriptor header buffer size */ +void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); + +/* get general interrupt status register */ +u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); + +/* set interrupt global control register */ +void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); + +/* set rx filter multicast filter mask register */ +void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* set global microprocessor scratch pad */ +void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, u32 scratch_scp); + +/* rpb */ + +/* set dma system loopback */ +void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); + +/* set rx buffer enable */ +void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); + +/* get rss key write enable */ +u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss key write enable */ +void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss redirection write enable */ +void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_accept_untagged_packets); + +/* Set VLAN filter enable */ +void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); + +/* Set VLAN Filter Action */ +void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); + +/* set ethertype filter enable */ +void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); + +/* set ethertype user-priority enable */ +void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter); + +/* set ethertype rx queue enable */ +void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coalescing_interval); + +/* rx */ + +/* set rx register reset disable */ +void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); + +/* set tx dca enable */ +void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); + +/* get tx descriptor head pointer */ +u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set tx buffer enable */ +void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); + +/* set tx buffer high threshold (per tc) */ +void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); + +/* set tx path pad insert enable */ +void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_vm_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); + +/* set register address for indirect address */ +void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); + +/* set register write data */ +void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); + +#endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h new file mode 100644 index 000000000000..5527fc0e5942 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -0,0 +1,2375 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 + +/* preprocessor definitions for msm rx errors counter register */ +#define mac_msm_rx_errs_cnt_adr 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u + +/* preprocessor definitions for rx dma statistics counter 7 */ +#define rx_dma_stat_counter7_adr 0x00006818u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u + +/* preprocessor definitions for global mif identification */ +#define glb_mif_id_adr 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define itr_iamrlsw_adr 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define itr_imcrlsw_adr 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define itr_imsrlsw_adr 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_adr 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_msk 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define itr_reg_res_dsbl_shift 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define itr_iscrlsw_adr 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define itr_isrlsw_adr 0x00002000 +/* register address for bitfield itr_reset */ +#define itr_res_adr 0x00002300 +/* bitmask for bitfield itr_reset */ +#define itr_res_msk 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define itr_res_shift 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_msk 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define rdm_dcadcpuid_shift 0 +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define rdm_dca_en_adr 0x00006180 +/* bitmask for bitfield dca_en */ +#define rdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define rdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define rdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define rdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define rdm_dca_en_default 0x1 + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_adr 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define rdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define rdm_dca_mode_default 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_msk 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_mskn 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_shift 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_width 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define rdm_descddata_size_default 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define rdm_dcaddesc_en_default 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define rdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define rdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define rdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define rdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define rdm_descden_default 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_msk 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_mskn 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_shift 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_width 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define rdm_descdhdr_size_default 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_msk 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_mskn 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_shift 28 +/* width of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_width 1 +/* default value of bitfield desc{d}_hdr_split */ +#define rdm_descdhdr_split_default 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define rdm_descdhd_width 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define rdm_descdlen_default 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_msk 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define rdm_descdreset_mskn 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define rdm_descdreset_shift 25 +/* width of bitfield desc{d}_reset */ +#define rdm_descdreset_width 1 +/* default value of bitfield desc{d}_reset */ +#define rdm_descdreset_default 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_adr 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_msk 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_mskn 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_shift 2 +/* width of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define rdm_int_desc_wrb_en_default 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_msk 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_mskn 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_shift 30 +/* width of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_width 1 +/* default value of bitfield dca{d}_hdr_en */ +#define rdm_dcadhdr_en_default 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_msk 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_mskn 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_shift 29 +/* width of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_width 1 +/* default value of bitfield dca{d}_pay_en */ +#define rdm_dcadpay_en_default 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_adr 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_msk 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_mskn 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_shift 3 +/* Width of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_width 1 +/* Default value of bitfield rdm_int_rim_en */ +#define rdm_int_rim_en_default 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define gen_intr_stat_adr 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define intr_glb_ctl_adr 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define rx_flr_mcst_flr_msk_adr 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define rx_flr_rss_control1_adr 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define rx_flr_control2_adr 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_adr 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_msk 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_shift 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_width 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define tdm_tx_dma_debug_ctl_default 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_base_addrlsw_adr(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_adr 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define rpb_dma_sys_lbk_default 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_adr 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_msk 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_shift 8 +/* width of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_width 1 +/* default value of bitfield rx_tc_mode */ +#define rpb_rpf_rx_tc_mode_default 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define rpb_rx_buf_en_adr 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define rpb_rx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define rpb_rx_buf_en_shift 0 +/* width of bitfield rx_buf_en */ +#define rpb_rx_buf_en_width 1 +/* default value of bitfield rx_buf_en */ +#define rpb_rx_buf_en_default 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_msk 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_mskn 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_shift 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_width 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define rpb_rxbhi_thresh_default 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_msk 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_mskn 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_shift 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_width 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define rpb_rxblo_thresh_default 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_adr 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_msk 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_mskn 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_shift 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_width 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define rpb_rx_fc_mode_default 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_msk 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_mskn 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_shift 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_width 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define rpb_rxbbuf_size_default 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_msk 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_mskn 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_shift 31 +/* width of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_width 1 +/* default value of bitfield rx{b}_xoff_en */ +#define rpb_rxbxoff_en_default 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_adr 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_msk 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_mskn 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_shift 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_width 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define rpfl2bc_thresh_default 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define rpfl2bc_en_adr 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_msk 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define rpfl2bc_en_mskn 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define rpfl2bc_en_shift 0 +/* width of bitfield l2_bc_en */ +#define rpfl2bc_en_width 1 +/* default value of bitfield l2_bc_en */ +#define rpfl2bc_en_default 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_adr 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_msk 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_mskn 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_shift 12 +/* width of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_width 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define rpfl2bc_act_default 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_shift 31 +/* width of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_width 1 +/* default value of bitfield l2_mc_en{f} */ +#define rpfl2mc_enf_default 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define rpfl2promis_mode_adr 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_msk 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define rpfl2promis_mode_mskn 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define rpfl2promis_mode_shift 3 +/* width of bitfield l2_promis_mode */ +#define rpfl2promis_mode_width 1 +/* default value of bitfield l2_promis_mode */ +#define rpfl2promis_mode_default 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_msk 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_mskn 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_shift 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_width 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define rpfl2uc_actf_default 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_msk 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_mskn 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_shift 31 +/* width of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_width 1 +/* default value of bitfield l2_uc_en{f} */ +#define rpfl2uc_enf_default 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_msk 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define rpfl2uc_dafmsw_shift 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_adr 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_msk 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_shift 14 +/* Width of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_width 1 +/* Default value of bitfield l2_mc_accept_all */ +#define rpfl2mc_accept_all_default 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_width 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define rpf_rpb_rx_tc_upt_default 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_adr 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_msk 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_mskn 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_shift 0 +/* width of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_width 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define rpf_rss_key_addr_default 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_adr 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_mskn 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_shift 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_width 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define rpf_rss_key_wr_data_default 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_adr 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_msk 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_mskn 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_shift 5 +/* width of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_width 1 +/* default value of bitfield rss_key_wr_en_i */ +#define rpf_rss_key_wr_eni_default 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_adr 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_msk 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_mskn 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_shift 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_width 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define rpf_rss_redir_addr_default 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_adr 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_msk 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_mskn 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_shift 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_width 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define rpf_rss_redir_wr_data_default 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_adr 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_msk 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_mskn 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_shift 4 +/* width of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_width 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define rpf_rss_redir_wr_eni_default 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_shift 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_width 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define rpf_tpo_rpf_sys_lbk_default 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_adr 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_msk 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_mskn 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_shift 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_width 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define rpf_vl_inner_tpid_default 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_adr 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_msk 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_mskn 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_shift 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_width 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define rpf_vl_outer_tpid_default 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_adr 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_msk 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_mskn 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_shift 1 +/* width of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_width 1 +/* default value of bitfield vl_promis_mode */ +#define rpf_vl_promis_mode_default 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_adr 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_msk 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_shift 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_width 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define rpf_vl_accept_untagged_mode_default 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_adr 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_msk 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_shift 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_width 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define rpf_vl_untagged_act_default 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define rpf_vl_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define rpf_vl_en_f_shift 31 +/* Width of bitfield vl_en{F} */ +#define rpf_vl_en_f_width 1 +/* Default value of bitfield vl_en{F} */ +#define rpf_vl_en_f_default 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_msk 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_mskn 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_shift 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_width 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define rpf_vl_act_f_default 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_msk 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_mskn 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_shift 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_width 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define rpf_vl_id_f_default 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_msk 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define rpf_et_en_f_mskn 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define rpf_et_en_f_shift 31 +/* Width of bitfield et_en{F} */ +#define rpf_et_en_f_width 1 +/* Default value of bitfield et_en{F} */ +#define rpf_et_en_f_default 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define rpf_et_enf_msk 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define rpf_et_enf_mskn 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define rpf_et_enf_shift 31 +/* width of bitfield et_en{f} */ +#define rpf_et_enf_width 1 +/* default value of bitfield et_en{f} */ +#define rpf_et_enf_default 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_msk 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define rpf_et_upfen_mskn 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define rpf_et_upfen_shift 30 +/* width of bitfield et_up{f}_en */ +#define rpf_et_upfen_width 1 +/* default value of bitfield et_up{f}_en */ +#define rpf_et_upfen_default 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_msk 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_mskn 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_shift 29 +/* width of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_width 1 +/* default value of bitfield et_rxq{f}_en */ +#define rpf_et_rxqfen_default 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_msk 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define rpf_et_upf_mskn 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_shift 26 +/* width of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_width 3 +/* default value of bitfield et_up{f}[2:0] */ +#define rpf_et_upf_default 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_msk 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_mskn 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_shift 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_width 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define rpf_et_rxqf_default 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_msk 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_mskn 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_shift 19 +/* width of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_width 1 +/* default value of bitfield et_mng_rxq{f} */ +#define rpf_et_mng_rxqf_default 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_msk 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define rpf_et_actf_mskn 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_shift 16 +/* width of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_width 3 +/* default value of bitfield et_act{f}[2:0] */ +#define rpf_et_actf_default 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_msk 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define rpf_et_valf_mskn 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_shift 0 +/* width of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_width 16 +/* default value of bitfield et_val{f}[f:0] */ +#define rpf_et_valf_default 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_adr 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define rpo_ipv4chk_en_default 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_msk 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_mskn 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_shift 29 +/* width of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_width 1 +/* default value of bitfield desc{d}_vl_strip */ +#define rpo_descdvl_strip_default 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define rpol4chk_en_adr 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define rpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define rpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define rpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define rpol4chk_en_default 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_adr 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define rx_reg_res_dsbl_default 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_msk 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_mskn 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_shift 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_width 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define tdm_dcadcpuid_default 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define tdm_lso_en_adr 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_msk 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define tdm_lso_en_mskn 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define tdm_lso_en_shift 0 +/* width of bitfield lso_en[1f:0] */ +#define tdm_lso_en_width 32 +/* default value of bitfield lso_en[1f:0] */ +#define tdm_lso_en_default 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define tdm_dca_en_adr 0x00008480 +/* bitmask for bitfield dca_en */ +#define tdm_dca_en_msk 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define tdm_dca_en_mskn 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define tdm_dca_en_shift 31 +/* width of bitfield dca_en */ +#define tdm_dca_en_width 1 +/* default value of bitfield dca_en */ +#define tdm_dca_en_default 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_adr 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_msk 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define tdm_dca_mode_mskn 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_shift 0 +/* width of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_width 4 +/* default value of bitfield dca_mode[3:0] */ +#define tdm_dca_mode_default 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_msk 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_mskn 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_shift 31 +/* width of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_width 1 +/* default value of bitfield dca{d}_desc_en */ +#define tdm_dcaddesc_en_default 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define tdm_descden_msk 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define tdm_descden_mskn 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define tdm_descden_shift 31 +/* width of bitfield desc{d}_en */ +#define tdm_descden_width 1 +/* default value of bitfield desc{d}_en */ +#define tdm_descden_default 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_msk 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_mskn 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_shift 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define tdm_descdhd_width 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_msk 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_mskn 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_shift 3 +/* width of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_width 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define tdm_descdlen_default 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_adr 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_msk 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_mskn 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_shift 1 +/* width of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_width 1 +/* default value of bitfield int_desc_wrb_en */ +#define tdm_int_desc_wrb_en_default 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_msk 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_mskn 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_shift 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_width 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define tdm_descdwrb_thresh_default 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_shift 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_width 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define thm_lso_tcp_flag_first_default 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_adr 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_msk 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_mskn 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_shift 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_width 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define thm_lso_tcp_flag_last_default 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_adr 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_mskn 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_shift 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_width 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define rpo_lro_rsc_max_default 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define rpo_lro_en_adr 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_msk 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define rpo_lro_en_mskn 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define rpo_lro_en_shift 0 +/* Width of bitfield lro_en[1F:0] */ +#define rpo_lro_en_width 32 +/* Default value of bitfield lro_en[1F:0] */ +#define rpo_lro_en_default 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_adr 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_msk 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_shift 15 +/* Width of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_width 1 +/* Default value of bitfield lro_ptopt_en */ +#define rpo_lro_ptopt_en_defalt 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_msk 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_shift 12 +/* Width of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_width 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define rpo_lro_qses_lmt_default 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_shift 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_width 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define rpo_lro_tot_dsc_lmt_defalt 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_adr 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_msk 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_shift 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_width 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define rpo_lro_pkt_min_default 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_width 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define rpo_lro_ldes_max_default 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_adr 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_msk 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_mskn 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_shift 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_width 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define rpo_lro_tb_div_default 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_adr 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_msk 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_mskn 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_shift 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_width 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define rpo_lro_ina_ival_default 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_adr 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_msk 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_mskn 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_shift 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_width 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define rpo_lro_max_ival_default 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_msk 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_shift 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_width 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define tdm_dca_dcpuid_default 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_msk 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_shift 31 +/* Width of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_width 1 +/* Default value of bitfield dca{D}_desc_en */ +#define tdm_dca_ddesc_en_default 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_msk 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define tdm_desc_den_mskn 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define tdm_desc_den_shift 31 +/* Width of bitfield desc{D}_en */ +#define tdm_desc_den_width 1 +/* Default value of bitfield desc{D}_en */ +#define tdm_desc_den_default 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_msk 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_mskn 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_shift 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define tdm_desc_dhd_width 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_msk 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_mskn 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_shift 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_width 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define tdm_desc_dlen_default 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_adr(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_msk 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_shift 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_width 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define tdm_desc_dwrb_thresh_default 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_adr 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_msk 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_mskn 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_shift 4 +/* Width of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_width 1 +/* Default value of bitfield tdm_int_mod_en */ +#define tdm_int_mod_en_default 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_adr 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_shift 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_width 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define thm_lso_tcp_flag_mid_default 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define tpb_tx_buf_en_adr 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_msk 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define tpb_tx_buf_en_mskn 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define tpb_tx_buf_en_shift 0 +/* width of bitfield tx_buf_en */ +#define tpb_tx_buf_en_width 1 +/* default value of bitfield tx_buf_en */ +#define tpb_tx_buf_en_default 0x0 + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_msk 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_mskn 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_shift 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_width 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define tpb_txbhi_thresh_default 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_msk 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_mskn 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_shift 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_width 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define tpb_txblo_thresh_default 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_adr 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_msk 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_mskn 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_shift 6 +/* width of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_width 1 +/* default value of bitfield dma_sys_loopback */ +#define tpb_dma_sys_lbk_default 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_msk 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_mskn 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_shift 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_width 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define tpb_txbbuf_size_default 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_adr 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_msk 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_mskn 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_shift 2 +/* width of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_width 1 +/* default value of bitfield tx_scp_ins_en */ +#define tpb_tx_scp_ins_en_default 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_adr 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_msk 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_mskn 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_shift 1 +/* width of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_width 1 +/* default value of bitfield ipv4_chk_en */ +#define tpo_ipv4chk_en_default 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define tpol4chk_en_adr 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_msk 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define tpol4chk_en_mskn 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define tpol4chk_en_shift 0 +/* width of bitfield l4_chk_en */ +#define tpol4chk_en_width 1 +/* default value of bitfield l4_chk_en */ +#define tpol4chk_en_default 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_adr 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_msk 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_mskn 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_shift 7 +/* width of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_width 1 +/* default value of bitfield pkt_sys_loopback */ +#define tpo_pkt_sys_lbk_default 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_adr 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_shift 0 +/* width of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_width 1 +/* default value of bitfield data_tc_arb_mode */ +#define tps_data_tc_arb_mode_default 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_adr 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_msk 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_mskn 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_shift 31 +/* width of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_width 1 +/* default value of bitfield desc_rate_ta_rst */ +#define tps_desc_rate_ta_rst_default 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_adr 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_msk 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_mskn 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_shift 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_width 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define tps_desc_rate_lim_default 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_adr 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_msk 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_mskn 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_shift 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_width 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define tps_desc_tc_arb_mode_default 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_shift 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_width 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define tps_desc_tctcredit_max_default 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_shift 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_width 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define tps_desc_tctweight_default 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_adr 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_msk 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_mskn 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_shift 0 +/* width of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_width 1 +/* default value of bitfield desc_vm_arb_mode */ +#define tps_desc_vm_arb_mode_default 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_msk 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_mskn 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_shift 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_width 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define tps_data_tctcredit_max_default 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_msk 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_mskn 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_shift 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_width 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define tps_data_tctweight_default 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_adr 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define tx_reg_res_dsbl_default 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define msm_reg_access_busy_adr 0x00004400 +/* bitmask for bitfield register access busy */ +#define msm_reg_access_busy_msk 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define msm_reg_access_busy_mskn 0xffffefff +/* lower bit position of bitfield register access busy */ +#define msm_reg_access_busy_shift 12 +/* width of bitfield register access busy */ +#define msm_reg_access_busy_width 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define msm_reg_addr_adr 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_msk 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define msm_reg_addr_mskn 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define msm_reg_addr_shift 0 +/* width of bitfield msm register address[7:0] */ +#define msm_reg_addr_width 8 +/* default value of bitfield msm register address[7:0] */ +#define msm_reg_addr_default 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define msm_reg_rd_strobe_adr 0x00004400 +/* bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_msk 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define msm_reg_rd_strobe_mskn 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define msm_reg_rd_strobe_shift 9 +/* width of bitfield register read strobe */ +#define msm_reg_rd_strobe_width 1 +/* default value of bitfield register read strobe */ +#define msm_reg_rd_strobe_default 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_adr 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_mskn 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_shift 0 +/* width of bitfield msm register read data[31:0] */ +#define msm_reg_rd_data_width 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_adr 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_msk 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_mskn 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_shift 0 +/* width of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_width 32 +/* default value of bitfield msm register write data[31:0] */ +#define msm_reg_wr_data_default 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define msm_reg_wr_strobe_adr 0x00004400 +/* bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_msk 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define msm_reg_wr_strobe_mskn 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define msm_reg_wr_strobe_shift 8 +/* width of bitfield register write strobe */ +#define msm_reg_wr_strobe_width 1 +/* default value of bitfield register write strobe */ +#define msm_reg_wr_strobe_default 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define glb_soft_res_adr 0x00000000 +/* bitmask for bitfield soft reset */ +#define glb_soft_res_msk 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define glb_soft_res_mskn 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define glb_soft_res_shift 15 +/* width of bitfield soft reset */ +#define glb_soft_res_width 1 +/* default value of bitfield soft reset */ +#define glb_soft_res_default 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define glb_reg_res_dis_adr 0x00000000 +/* bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_msk 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define glb_reg_res_dis_mskn 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define glb_reg_res_dis_shift 14 +/* width of bitfield register reset disable */ +#define glb_reg_res_dis_width 1 +/* default value of bitfield register reset disable */ +#define glb_reg_res_dis_default 0x1 + +/* tx dma debug control definitions */ +#define tx_dma_debug_ctl_adr 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define tx_dma_desc_base_addrmsw_adr(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_adr 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_msk 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_mskn 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_shift 29 +/* width of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_width 1 +/* default value of bitfield reg_res_dsbl */ +#define pci_reg_res_dsbl_default 0x1 + +/* global microprocessor scratch pad definitions */ +#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) + +#endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c new file mode 100644 index 000000000000..8d6d8f5804da --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -0,0 +1,570 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware + * abstraction layer. + */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_pci_func.h" +#include "../aq_ring.h" +#include "../aq_vec.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#include <linux/random.h> + +#define HW_ATL_UCP_0X370_REG 0x0370U + +#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_MPI_STATE_ADR 0x036CU + +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_MPI_SPEED_SHIFT 16U + +static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x00000200U, 0x00008000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + + *(p++) = aq_hw_read_reg(self, 0x0000020CU); + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt) +{ + int err = 0; + bool is_locked; + + is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIME; + goto err_exit; + } + + aq_hw_write_reg(self, 0x00000208U, a); + + for (++cnt; --cnt;) { + u32 i = 0U; + + aq_hw_write_reg(self, 0x0000020CU, *(p++)); + aq_hw_write_reg(self, 0x00000200U, 0xC000U); + + for (i = 1024U; + (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + } + } + + reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +{ + int err = 0; + const u32 dw_major_mask = 0xff000000U; + const u32 dw_minor_mask = 0x00ffffffU; + + err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; + if (err < 0) + goto err_exit; + err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + -EOPNOTSUPP : 0; +err_exit: + return err; +} + +static int hw_atl_utils_init_ucp(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + if (!aq_hw_read_reg(self, 0x370U)) { + unsigned int rnd = 0U; + unsigned int ucp_0x370 = 0U; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + + err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, + aq_hw_read_reg(self, 0x18U)); + return err; +} + +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + +struct aq_hw_atl_utils_fw_rpc_tid_s { + union { + u32 val; + struct { + u16 tid; + u16 len; + }; + }; +}; + +#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) + +static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + + if (!IS_CHIP_FEATURE(MIPS)) { + err = -1; + goto err_exit; + } + err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *)&PHAL_ATLANTIC->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); + if (err < 0) + goto err_exit; + + sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); + sw.len = (u16)rpc_size; + aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); + +err_exit: + return err; +} + +static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + struct aq_hw_atl_utils_fw_rpc_tid_s fw; + + do { + sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); + + PHAL_ATLANTIC->rpc_tid = sw.tid; + + AQ_HW_WAIT_FOR(sw.tid == + (fw.val = + aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), + fw.tid), 1000U, 100U); + if (err < 0) + goto err_exit; + + if (fw.len == 0xFFFFU) { + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; + } + } while (sw.tid != fw.tid || 0xFFFFU == fw.len); + if (err < 0) + goto err_exit; + + if (rpc) { + if (fw.len) { + err = + hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->rpc_addr, + (u32 *)(void *) + &PHAL_ATLANTIC->rpc, + (fw.len + sizeof(u32) - + sizeof(u8)) / + sizeof(u32)); + if (err < 0) + goto err_exit; + } + + *rpc = &PHAL_ATLANTIC->rpc; + } + +err_exit: + return err; +} + +static int hw_atl_utils_mpi_create(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps) +{ + int err = 0; + + err = hw_atl_utils_init_ucp(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + err = hw_atl_utils_fw_rpc_init(self); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox) +{ + int err = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + PHAL_ATLANTIC->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); + if (err < 0) + goto err_exit; + + if (pmbox != &PHAL_ATLANTIC->mbox) + memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); + + if (IS_CHIP_FEATURE(REVISION_A0)) { + unsigned int mtu = self->aq_nic_cfg ? + self->aq_nic_cfg->mtu : 1514U; + pmbox->stats.ubrc = pmbox->stats.uprc * mtu; + pmbox->stats.ubtc = pmbox->stats.uptc * mtu; + pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); + } else { + pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); + } + +err_exit:; +} + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state) +{ + u32 ucp_0x368 = 0; + + ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); + + return 0; +} + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, u32 speed) +{ + int err = 0; + u32 transaction_id = 0; + + if (state == MPI_RESET) { + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + transaction_id = PHAL_ATLANTIC->mbox.transaction_id; + + AQ_HW_WAIT_FOR(transaction_id != + (hw_atl_utils_mpi_read_stats + (self, &PHAL_ATLANTIC->mbox), + PHAL_ATLANTIC->mbox.transaction_id), + 1000U, 100U); + if (err < 0) + goto err_exit; + } + + err = hw_atl_utils_mpi_set_speed(self, speed, state); + +err_exit:; +} + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status) +{ + u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + + if (!link_speed_mask) { + link_status->mbps = 0U; + } else { + switch (link_speed_mask) { + case HAL_ATLANTIC_RATE_10G: + link_status->mbps = 10000U; + break; + + case HAL_ATLANTIC_RATE_5G: + case HAL_ATLANTIC_RATE_5GSR: + link_status->mbps = 5000U; + break; + + case HAL_ATLANTIC_RATE_2GS: + link_status->mbps = 2500U; + break; + + case HAL_ATLANTIC_RATE_1G: + link_status->mbps = 1000U; + break; + + case HAL_ATLANTIC_RATE_100M: + link_status->mbps = 100U; + break; + + default: + link_status->mbps = 0U; + break; + } + } + + return 0; +} + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2]; + + self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); + + hw_atl_utils_hw_chip_features_init(self, + &PHAL_ATLANTIC_A0->chip_features); + + err = hw_atl_utils_mpi_create(self, aq_hw_caps); + if (err < 0) + goto err_exit; + + if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { + unsigned int rnd = 0; + unsigned int ucp_0x370 = 0; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + err = hw_atl_utils_fw_downld_dwords(self, + aq_hw_read_reg(self, 0x00000374U) + + (40U * 4U), + mac_addr, + AQ_DIMOF(mac_addr)); + if (err < 0) { + mac_addr[0] = 0U; + mac_addr[1] = 0U; + err = 0; + } else { + mac_addr[0] = __swab32(mac_addr[0]); + mac_addr[1] = __swab32(mac_addr[1]); + } + + ether_addr_copy(mac, (u8 *)mac_addr); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + +err_exit: + return err; +} + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) +{ + unsigned int ret = 0U; + + switch (mbps) { + case 100U: + ret = 5U; + break; + + case 1000U: + ret = 4U; + break; + + case 2500U: + ret = 3U; + break; + + case 5000U: + ret = 1U; + break; + + case 10000U: + ret = 0U; + break; + + default: + break; + } + return ret; +} + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) +{ + u32 chip_features = 0U; + u32 val = reg_glb_mif_id_get(self); + u32 mif_rev = val & 0xFFU; + + if ((3U & mif_rev) == 1U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS; + } else if ((3U & mif_rev) == 2U) { + chip_features |= + HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } + + *p = chip_features; +} + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +{ + hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + return 0; +} + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state) +{ + hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + return 0; +} + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, unsigned int *p_count) +{ + struct hw_atl_stats_s *stats = NULL; + int i = 0; + + hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); + + stats = &PHAL_ATLANTIC->mbox.stats; + + data[i] = stats->uprc + stats->mprc + stats->bprc; + data[++i] = stats->uprc; + data[++i] = stats->mprc; + data[++i] = stats->bprc; + data[++i] = stats->erpt; + data[++i] = stats->uptc + stats->mptc + stats->bptc; + data[++i] = stats->uptc; + data[++i] = stats->mptc; + data[++i] = stats->bptc; + data[++i] = stats->ubrc; + data[++i] = stats->ubtc; + data[++i] = stats->mbrc; + data[++i] = stats->mbtc; + data[++i] = stats->bbrc; + data[++i] = stats->bbtc; + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); + data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); + data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); + data[++i] = stats->dpc; + + if (p_count) + *p_count = ++i; + + return 0; +} + +static const u32 hw_atl_utils_hw_mac_regs[] = { + 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, + 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, + 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, + 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, + 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, + 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, + 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, + 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, + 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, + 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, + 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, + 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, + 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, + 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, + 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, + 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, + 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, + 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, + 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, + 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, + 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, + 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, +}; + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff) +{ + unsigned int i = 0U; + + for (i = 0; i < aq_hw_caps->mac_regs_count; i++) + regs_buff[i] = aq_hw_read_reg(self, + hw_atl_utils_hw_mac_regs[i]); + return 0; +} + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) +{ + *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h new file mode 100644 index 000000000000..b8e3d88f0879 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -0,0 +1,210 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware + * abstraction layer. + */ + +#ifndef HW_ATL_UTILS_H +#define HW_ATL_UTILS_H + +#include "../aq_common.h" + +#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } + +struct __packed hw_atl_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 dpc; +}; + +union __packed ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +}; + +struct __packed hw_aq_atl_utils_fw_rpc { + u32 msg_id; + + union { + struct { + u32 pong; + } msg_ping; + + struct { + u8 mac_addr[6]; + u32 ip_addr_cnt; + + struct { + union ip_addr addr; + union ip_addr mask; + } ip[1]; + } msg_arp; + + struct { + u32 len; + u8 packet[1514U]; + } msg_inject; + + struct { + u32 priority; + u32 wol_packet_type; + u16 friendly_name_len; + u16 friendly_name[65]; + u32 pattern_id; + u32 next_wol_pattern_offset; + + union { + struct { + u32 flags; + u8 ipv4_source_address[4]; + u8 ipv4_dest_address[4]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv4_tcp_syn_parameters; + + struct { + u32 flags; + u8 ipv6_source_address[16]; + u8 ipv6_dest_address[16]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv6_tcp_syn_parameters; + + struct { + u32 flags; + } eapol_request_id_message_parameters; + + struct { + u32 flags; + u32 mask_offset; + u32 mask_size; + u32 pattern_offset; + u32 pattern_size; + } wol_bit_map_pattern; + } wol_pattern; + } msg_wol; + + struct { + u32 is_wake_on_link_down; + u32 is_wake_on_link_up; + } msg_wolink; + }; +}; + +struct __packed hw_aq_atl_utils_mbox { + u32 version; + u32 transaction_id; + int error; + struct hw_atl_stats_s stats; +}; + +struct __packed hw_atl_s { + struct aq_hw_s base; + struct hw_aq_atl_utils_mbox mbox; + u64 speed; + u32 itr_tx; + u32 itr_rx; + unsigned int chip_features; + u32 fw_ver_actual; + atomic_t dpc; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; +}; + +#define SELF ((struct hw_atl_s *)self) + +#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self))) +#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self))) + +#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U +#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U +#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U +#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U + +#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ + PHAL_ATLANTIC->chip_features) + +enum hal_atl_utils_fw_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +#define HAL_ATLANTIC_RATE_10G BIT(0) +#define HAL_ATLANTIC_RATE_5G BIT(1) +#define HAL_ATLANTIC_RATE_5GSR BIT(2) +#define HAL_ATLANTIC_RATE_2GS BIT(3) +#define HAL_ATLANTIC_RATE_1G BIT(4) +#define HAL_ATLANTIC_RATE_100M BIT(5) +#define HAL_ATLANTIC_RATE_INVALID BIT(6) + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox); + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, + u32 speed); + +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, + enum hal_atl_utils_fw_state_e state); + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, + struct aq_hw_link_status_s *link_status); + +int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u8 *mac); + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff); + +int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, + struct ethtool_cmd *cmd); + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state); + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self); + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); + +int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, + u64 *data, + unsigned int *p_count); + +#endif /* HW_ATL_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h new file mode 100644 index 000000000000..0de858d215c2 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -0,0 +1,18 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#ifndef VER_H +#define VER_H + +#define NIC_MAJOR_DRIVER_VERSION 1 +#define NIC_MINOR_DRIVER_VERSION 5 +#define NIC_BUILD_DRIVER_VERSION 345 +#define NIC_REVISION_DRIVER_VERSION 0 + +#endif /* VER_H */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index abc9f2a59054..23873395f100 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 7dcc907a449d..6a27c2662675 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget) if (!tx_complete || work == budget) return budget; - napi_complete(&np->napi); + napi_complete_done(&np->napi, work); /* enable interrupt */ if (alx->flags & ALX_FLAG_USING_MSIX) { @@ -1648,8 +1648,8 @@ static void alx_poll_controller(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *net_stats) +static void alx_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *net_stats) { struct alx_priv *alx = netdev_priv(dev); struct alx_hw_stats *hw_stats = &alx->hw.stats; @@ -1693,8 +1693,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; spin_unlock(&alx->stats_lock); - - return net_stats; } static const struct net_device_ops alx_netdev_ops = { @@ -1823,6 +1821,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 773d3b7d8dd5..7e913d8331c3 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget) if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); adapter->hw.intr_mask |= ISR_RX_PKT; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index e96091b652a7..4f7e195af0bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1472,7 +1472,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, prrs->vtag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - netif_receive_skb(skb); + napi_gro_receive(&adapter->napi, skb); skip_pkt: /* skip current packet whether it's ok or not. */ @@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: - napi_complete(napi); + napi_complete_done(napi, work_done); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 7dad8e4b9d2a..022772e1e249 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) if (work_done >= budget) return work_done; - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable Interrupt */ if (likely(adapter->int_enabled)) atlx_imr_set(adapter, IMR_NORMAL_MASK); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 48707ed76ffc..5b95bb48ce97 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); b44_enable_ints(bp); } @@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev) return 0; } -static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *nstat) +static void b44_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *nstat) { struct b44 *bp = netdev_priv(dev); struct b44_hw_stats *hwstat = &bp->hw_stats; @@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, #endif } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); - return nstat; } static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c483618b57bd..0ee6e208aa07 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) /* no more packet in rx/tx queue, remove device from poll * queue */ - napi_complete(napi); + napi_complete_done(napi, rx_work_done); /* restore rx/tx interrupt */ enet_dmac_writel(priv, priv->dma_chan_int_mask, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 744ed6ddaf37..a68d4889f5db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); -BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); +/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact + * same layout, except it has been moved by 4 bytes up, *sigh* + */ +static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) +{ + if (priv->is_lite && off >= RDMA_STATUS) + off += 4; + __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); +} + +static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) +{ + if (!priv->is_lite) { + return BIT(bit); + } else { + if (bit >= ACB_ALGO) + return BIT(bit + 1); + else + return BIT(bit); + } +} + /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. */ @@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev, priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); reg = tdma_readl(priv, TDMA_CONTROL); if (priv->tsb_en) - reg |= TSB_EN; + reg |= tdma_control_bit(priv, TSB_EN); else - reg &= ~TSB_EN; + reg &= ~tdma_control_bit(priv, TSB_EN); tdma_writel(priv, reg, TDMA_CONTROL); return 0; @@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) priv->msg_enable = enable; } +static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) +{ + switch (type) { + case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_RXCHK: + case BCM_SYSPORT_STAT_RBUF: + case BCM_SYSPORT_STAT_SOFT: + return true; + default: + return false; + } +} + static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) { + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + unsigned int i, j; + switch (string_set) { case ETH_SS_STATS: - return BCM_SYSPORT_STATS_LEN; + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + j++; + } + return j; default: return -EOPNOTSUPP; } @@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) static void bcm_sysport_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - int i; + struct bcm_sysport_priv *priv = netdev_priv(dev); + const struct bcm_sysport_stats *s; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sysport_gstrings_stats[i].stat_string, + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + s = &bcm_sysport_gstrings_stats[i]; + if (priv->is_lite && + !bcm_sysport_lite_stat_valid(s->type)) + continue; + + memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, ETH_GSTRING_LEN); + j++; } break; default: @@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: case BCM_SYSPORT_STAT_RUNT: + if (priv->is_lite) + continue; + if (s->type != BCM_SYSPORT_STAT_MIB_RX) offset = UMAC_MIB_STAT_OFFSET; val = umac_readl(priv, UMAC_MIB_START + j + offset); @@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); - int i; + int i, j; if (netif_running(dev)) bcm_sysport_update_mib_counters(priv); - for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; char *p; @@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(unsigned long *)p; + data[j] = *(unsigned long *)p; + j++; } } @@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, u16 len, status; struct bcm_rsb *rsb; - /* Determine how much we should process since last call */ - p_index = rdma_readl(priv, RDMA_PROD_INDEX); + /* Determine how much we should process since last call, SYSTEMPORT Lite + * groups the producer and consumer indexes into the same 32-bit + * which we access using RDMA_CONS_INDEX + */ + if (!priv->is_lite) + p_index = rdma_readl(priv, RDMA_PROD_INDEX); + else + p_index = rdma_readl(priv, RDMA_CONS_INDEX); p_index &= RDMA_PROD_INDEX_MASK; if (p_index < priv->rx_c_index) @@ -791,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ - intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + if (!ring->priv->is_lite) + intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + else + intrl2_0_mask_clear(ring->priv, BIT(ring->index + + INTRL2_0_TDMA_MBDONE_SHIFT)); return 0; } @@ -817,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) priv->rx_c_index += work_done; priv->rx_c_index &= RDMA_CONS_INDEX_MASK; - rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + + /* SYSTEMPORT Lite groups the producer/consumer index, producer is + * maintained by HW, but writes to it will be ignore while RDMA + * is active + */ + if (!priv->is_lite) + rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + else + rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -848,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *txr; + unsigned int ring, ring_bit; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -877,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) bcm_sysport_resume_from_wol(priv); } + if (!priv->is_lite) + goto out; + + for (ring = 0; ring < dev->num_tx_queues; ring++) { + ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); + if (!(priv->irq0_stat & ring_bit)) + continue; + + txr = &priv->tx_rings[ring]; + + if (likely(napi_schedule_prep(&txr->napi))) { + intrl2_0_mask_set(priv, ring_bit); + __napi_schedule(&txr->napi); + } + } +out: return IRQ_HANDLED; } @@ -930,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev) bcm_sysport_rx_isr(priv->irq0, priv); enable_irq(priv->irq0); - disable_irq(priv->irq1); - bcm_sysport_tx_isr(priv->irq1, priv); - enable_irq(priv->irq1); + if (!priv->is_lite) { + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); + } } #endif @@ -1129,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) priv->old_duplex = phydev->duplex; } + if (priv->is_lite) + goto out; + switch (phydev->speed) { case SPEED_2500: cmd_bits = CMD_SPEED_2500; @@ -1169,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev) reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); } - - phy_print_status(phydev); +out: + if (changed) + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1315,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, reg = tdma_readl(priv, TDMA_CONTROL); if (enable) - reg |= TDMA_EN; + reg |= tdma_control_bit(priv, TDMA_EN); else - reg &= ~TDMA_EN; + reg &= ~tdma_control_bit(priv, TDMA_EN); tdma_writel(priv, reg, TDMA_CONTROL); /* Poll for TMDA disabling completion */ @@ -1342,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) int i; /* Initialize SW view of the RX ring */ - priv->num_rx_bds = NUM_RX_DESC; + priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; priv->rx_c_index = 0; priv->rx_read_ptr = 0; @@ -1379,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_START_ADDR_HI); rdma_writel(priv, 0, RDMA_START_ADDR_LO); rdma_writel(priv, 0, RDMA_END_ADDR_HI); - rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); rdma_writel(priv, 1, RDMA_MBDONE_INTR); @@ -1421,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); if (dev->flags & IFF_PROMISC) reg |= CMD_PROMISC; @@ -1438,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, { u32 reg; - reg = umac_readl(priv, UMAC_CMD); - if (enable) - reg |= mask; - else - reg &= ~mask; - umac_writel(priv, reg, UMAC_CMD); + if (!priv->is_lite) { + reg = umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_writel(priv, reg, UMAC_CMD); + } else { + reg = gib_readl(priv, GIB_CONTROL); + if (enable) + reg |= mask; + else + reg &= ~mask; + gib_writel(priv, reg, GIB_CONTROL); + } /* UniMAC stops on a packet boundary, wait for a full-sized packet * to be processed (1 msec). @@ -1456,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) { u32 reg; + if (priv->is_lite) + return; + reg = umac_readl(priv, UMAC_CMD); reg |= CMD_SW_RESET; umac_writel(priv, reg, UMAC_CMD); @@ -1468,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv) static void umac_set_hw_addr(struct bcm_sysport_priv *priv, unsigned char *addr) { - umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | - (addr[2] << 8) | addr[3], UMAC_MAC0); - umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + if (!priv->is_lite) { + umac_writel(priv, mac0, UMAC_MAC0); + umac_writel(priv, mac1, UMAC_MAC1); + } else { + gib_writel(priv, mac0, GIB_MAC0); + gib_writel(priv, mac1, GIB_MAC1); + } } static void topctrl_flush(struct bcm_sysport_priv *priv) @@ -1515,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev) phy_start(dev->phydev); - /* Enable TX interrupts for the 32 TXQs */ - intrl2_1_mask_clear(priv, 0xffffffff); + /* Enable TX interrupts for the TXQs */ + if (!priv->is_lite) + intrl2_1_mask_clear(priv, 0xffffffff); + else + intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); /* Last call before we start the real business */ netif_tx_start_all_queues(dev); @@ -1528,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; + /* Set a correct RSB format on SYSTEMPORT Lite */ + if (priv->is_lite) { + reg &= ~RBUF_RSB_SWAP1; + reg |= RBUF_RSB_SWAP0; + } rbuf_writel(priv, reg, RBUF_CONTROL); } +static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) +{ + intrl2_0_mask_set(priv, 0xffffffff); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + if (!priv->is_lite) { + intrl2_1_mask_set(priv, 0xffffffff); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + } +} + +static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) +{ + u32 __maybe_unused reg; + + /* Include Broadcom tag in pad extension */ + if (netdev_uses_dsa(priv->netdev)) { + reg = gib_readl(priv, GIB_CONTROL); + reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); + reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; + gib_writel(priv, reg, GIB_CONTROL); + } +} + static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1551,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); /* Read CRC forward */ - priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + if (!priv->is_lite) + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + else + priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); @@ -1572,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev) priv->old_pause = -1; /* mask all interrupts and request them */ - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + bcm_sysport_mask_all_intrs(priv); ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); if (ret) { @@ -1585,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev) goto out_phy_disconnect; } - ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); - if (ret) { - netdev_err(dev, "failed to request TX interrupt\n"); - goto out_free_irq0; + if (!priv->is_lite) { + ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, + dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request TX interrupt\n"); + goto out_free_irq0; + } } /* Initialize both hardware and software ring */ @@ -1635,7 +1800,8 @@ out_free_rx_ring: out_free_tx_ring: for (i = 0; i < dev->num_tx_queues; i++) bcm_sysport_fini_tx_ring(priv, i); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: @@ -1653,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) phy_stop(dev->phydev); /* mask all interrupts */ - intrl2_0_mask_set(priv, 0xffffffff); - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_mask_set(priv, 0xffffffff); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + bcm_sysport_mask_all_intrs(priv); } static int bcm_sysport_stop(struct net_device *dev) @@ -1694,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev) bcm_sysport_fini_rx_ring(priv); free_irq(priv->irq0, dev); - free_irq(priv->irq1, dev); + if (!priv->is_lite) + free_irq(priv->irq1, dev); /* Disconnect from PHY */ phy_disconnect(dev->phydev); @@ -1733,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #define REV_FMT "v%2x.%02x" +static const struct bcm_sysport_hw_params bcm_sysport_params[] = { + [SYSTEMPORT] = { + .is_lite = false, + .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, + }, + [SYSTEMPORT_LITE] = { + .is_lite = true, + .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, + }, +}; + +static const struct of_device_id bcm_sysport_of_match[] = { + { .compatible = "brcm,systemportlite-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, + { .compatible = "brcm,systemport-v1.00", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { .compatible = "brcm,systemport", + .data = &bcm_sysport_params[SYSTEMPORT] }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); + static int bcm_sysport_probe(struct platform_device *pdev) { + const struct bcm_sysport_hw_params *params; + const struct of_device_id *of_id = NULL; struct bcm_sysport_priv *priv; struct device_node *dn; struct net_device *dev; @@ -1745,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) dn = pdev->dev.of_node; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + of_id = of_match_node(bcm_sysport_of_match, dn); + if (!of_id || !of_id->data) + return -EINVAL; + + /* Fairly quickly we need to know the type of adapter we have */ + params = of_id->data; /* Read the Transmit/Receive Queue properties */ if (of_property_read_u32(dn, "systemport,num-txq", &txq)) @@ -1752,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) rxq = 1; + /* Sanity check the number of transmit queues */ + if (!txq || txq > TDMA_NUM_RINGS) + return -EINVAL; + dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); if (!dev) return -ENOMEM; @@ -1759,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* Initialize private members */ priv = netdev_priv(dev); + /* Allocate number of TX rings */ + priv->tx_rings = devm_kcalloc(&pdev->dev, txq, + sizeof(struct bcm_sysport_tx_ring), + GFP_KERNEL); + if (!priv->tx_rings) + return -ENOMEM; + + priv->is_lite = params->is_lite; + priv->num_rx_desc_words = params->num_rx_desc_words; + priv->irq0 = platform_get_irq(pdev, 0); - priv->irq1 = platform_get_irq(pdev, 1); + if (!priv->is_lite) + priv->irq1 = platform_get_irq(pdev, 1); priv->wol_irq = platform_get_irq(pdev, 2); - if (priv->irq0 <= 0 || priv->irq1 <= 0) { + if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { dev_err(&pdev->dev, "invalid interrupts\n"); ret = -EINVAL; goto err_free_netdev; @@ -1836,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, - "Broadcom SYSTEMPORT" REV_FMT + "Broadcom SYSTEMPORT%s" REV_FMT " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + priv->is_lite ? " Lite" : "", (priv->rev >> 8) & 0xff, priv->rev & 0xff, priv->base, priv->irq0, priv->irq1, txq, rxq); @@ -2033,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d) rbuf_init(priv); /* Set maximum frame length */ - umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + if (!priv->is_lite) + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + else + gib_set_pad_extension(priv); /* Set MAC address */ umac_set_hw_addr(priv, dev->dev_addr); @@ -2069,13 +2282,6 @@ out_free_tx_rings: static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, bcm_sysport_suspend, bcm_sysport_resume); -static const struct of_device_id bcm_sysport_of_match[] = { - { .compatible = "brcm,systemport-v1.00" }, - { .compatible = "brcm,systemport" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); - static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, .remove = bcm_sysport_remove, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 1c82e3da69a7..863ddd7870b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -127,6 +127,10 @@ struct bcm_rsb { #define INTRL2_0_DESC_ALLOC_ERR (1 << 10) #define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11) +/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */ +#define INTRL2_0_TDMA_MBDONE_SHIFT 12 +#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT) + /* RXCHK offset and defines */ #define SYS_PORT_RXCHK_OFFSET 0x300 @@ -176,7 +180,9 @@ struct bcm_rsb { #define RBUF_OK_TO_SEND_MASK 0xff #define RBUF_CRC_REPLACE (1 << 20) #define RBUF_OK_TO_SEND_MODE (1 << 21) -#define RBUF_RSB_SWAP (1 << 22) +/* SYSTEMPORT Lite uses two bits here */ +#define RBUF_RSB_SWAP0 (1 << 22) +#define RBUF_RSB_SWAP1 (1 << 23) #define RBUF_ACPI_EN (1 << 23) #define RBUF_PKT_RDY_THRESH 0x04 @@ -247,6 +253,7 @@ struct bcm_rsb { #define MIB_RUNT_CNT_RST (1 << 1) #define MIB_TX_CNT_RST (1 << 2) +/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */ #define UMAC_MPD_CTRL 0x620 #define MPD_EN (1 << 0) #define MSEQ_LEN_SHIFT 16 @@ -258,6 +265,34 @@ struct bcm_rsb { #define UMAC_MDF_CTRL 0x650 #define UMAC_MDF_ADDR 0x654 +/* Only valid on SYSTEMPORT Lite */ +#define SYS_PORT_GIB_OFFSET 0x1000 + +#define GIB_CONTROL 0x00 +#define GIB_TX_EN (1 << 0) +#define GIB_RX_EN (1 << 1) +#define GIB_TX_FLUSH (1 << 2) +#define GIB_RX_FLUSH (1 << 3) +#define GIB_GTX_CLK_SEL_SHIFT 4 +#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) +#define GIB_FCS_STRIP (1 << 6) +#define GIB_LCL_LOOP_EN (1 << 7) +#define GIB_LCL_LOOP_TXEN (1 << 8) +#define GIB_RMT_LOOP_EN (1 << 9) +#define GIB_RMT_LOOP_RXEN (1 << 10) +#define GIB_RX_PAUSE_EN (1 << 11) +#define GIB_PREAMBLE_LEN_SHIFT 12 +#define GIB_PREAMBLE_LEN_MASK 0xf +#define GIB_IPG_LEN_SHIFT 16 +#define GIB_IPG_LEN_MASK 0x3f +#define GIB_PAD_EXTENSION_SHIFT 22 +#define GIB_PAD_EXTENSION_MASK 0x3f + +#define GIB_MAC1 0x08 +#define GIB_MAC0 0x0c + /* Receive DMA offset and defines */ #define SYS_PORT_RDMA_OFFSET 0x2000 @@ -409,16 +444,19 @@ struct bcm_rsb { RING_PCP_DEI_VID) #define TDMA_CONTROL 0x600 -#define TDMA_EN (1 << 0) -#define TSB_EN (1 << 1) -#define TSB_SWAP (1 << 2) -#define ACB_ALGO (1 << 3) +#define TDMA_EN 0 +#define TSB_EN 1 +/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we + * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() + */ +#define TSB_SWAP 2 +#define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff -#define VLAN_EN (1 << 14) -#define SW_BRCM_TAG (1 << 15) -#define WNC_KPT_SIZE_UPDATE (1 << 16) -#define SYNC_PKT_SIZE (1 << 17) +#define VLAN_EN 14 +#define SW_BRCM_TAG 15 +#define WNC_KPT_SIZE_UPDATE 16 +#define SYNC_PKT_SIZE 17 #define ACH_TXDONE_DELAY_SHIFT 18 #define ACH_TXDONE_DELAY_MASK 0xff @@ -475,12 +513,12 @@ struct dma_desc { }; /* Number of Receive hardware descriptor words */ -#define NUM_HW_RX_DESC_WORDS 1024 -/* Real number of usable descriptors */ -#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) +#define SP_NUM_HW_RX_DESC_WORDS 1024 +#define SP_LT_NUM_HW_RX_DESC_WORDS 256 -/* Internal linked-list RAM has up to 1536 entries */ -#define NUM_TX_DESC 1536 +/* Internal linked-list RAM size */ +#define SP_NUM_TX_DESC 1536 +#define SP_LT_NUM_TX_DESC 256 #define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) @@ -627,6 +665,16 @@ struct bcm_sysport_cb { DEFINE_DMA_UNMAP_LEN(dma_len); }; +enum bcm_sysport_type { + SYSTEMPORT = 0, + SYSTEMPORT_LITE, +}; + +struct bcm_sysport_hw_params { + bool is_lite; + unsigned int num_rx_desc_words; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -651,6 +699,8 @@ struct bcm_sysport_priv { u32 irq0_mask; u32 irq1_stat; u32 irq1_mask; + bool is_lite; + unsigned int num_rx_desc_words; struct napi_struct napi ____cacheline_aligned; struct net_device *netdev; struct platform_device *pdev; @@ -659,7 +709,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ - struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ void __iomem *rx_bds; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 7c19c8e2bf91..6ce80cbcb48e 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -12,11 +12,6 @@ #include <linux/brcmphy.h> #include "bgmac.h" -struct bcma_mdio { - struct bcma_device *core; - u8 phyaddr; -}; - static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, * PHY ops **************************************************/ -static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) { struct bcma_device *core; u16 phy_access_addr; @@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, +static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) { struct bcma_device *core; @@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, u16 phy_ctl_addr; u32 tmp; - if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bcma_mdio->core->bus->drv_gmac_cmn.core; + if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->bcma.core->bus->drv_gmac_cmn.core; phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; } else { - core = bcma_mdio->core; + core = bgmac->bcma.core; phy_access_addr = BGMAC_PHY_ACCESS; phy_ctl_addr = BGMAC_PHY_CNTL; } @@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, tmp |= phyaddr; bcma_write32(core, phy_ctl_addr, tmp); - bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) dev_warn(&core->dev, "Error setting MDIO int\n"); tmp = BGMAC_PA_START; @@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +static void bcma_mdio_phy_init(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo; u8 i; + /* For some legacy hardware we do chipset-based PHY initialization here + * without even detecting PHY ID. It's hacky and should be cleaned as + * soon as someone can test it. + */ if (ci->id == BCMA_CHIP_ID_BCM5356) { for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); - bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); for (i = 0; i < 5; i++) { - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); - bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); - bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); - bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073); + bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273); + bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b); } + return; } + + /* For all other hw do initialization using PHY subsystem. */ + if (bgmac->net_dev && bgmac->net_dev->phydev) + phy_init_hw(bgmac->net_dev->phydev); } /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ static int bcma_mdio_phy_reset(struct mii_bus *bus) { - struct bcma_mdio *bcma_mdio = bus->priv; - u8 phyaddr = bcma_mdio->phyaddr; + struct bgmac *bgmac = bus->priv; + u8 phyaddr = bgmac->phyaddr; - if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + if (phyaddr == BGMAC_PHY_NOREGS) return 0; - bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET); udelay(100); - if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) - dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); - bcma_mdio_phy_init(bcma_mdio); + if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(bgmac->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bgmac); return 0; } @@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); } -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) { - struct bcma_mdio *bcma_mdio; + struct bcma_device *core = bgmac->bcma.core; struct mii_bus *mii_bus; int err; - bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); - if (!bcma_mdio) - return ERR_PTR(-ENOMEM); - mii_bus = mdiobus_alloc(); if (!mii_bus) { err = -ENOMEM; @@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) mii_bus->name = "bcma_mdio mii bus"; sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, core->core_unit); - mii_bus->priv = bcma_mdio; + mii_bus->priv = bgmac; mii_bus->read = bcma_mdio_mii_read; mii_bus->write = bcma_mdio_mii_write; mii_bus->reset = bcma_mdio_phy_reset; mii_bus->parent = &core->dev; - mii_bus->phy_mask = ~(1 << phyaddr); - - bcma_mdio->core = core; - bcma_mdio->phyaddr = phyaddr; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); err = mdiobus_register(mii_bus); if (err) { @@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) err_free_bus: mdiobus_free(mii_bus); err: - kfree(bcma_mdio); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) { - struct bcma_mdio *bcma_mdio; - if (!mii_bus) return; - bcma_mdio = mii_bus->priv; - mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); - kfree(bcma_mdio); } EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 4a4ffc0c4c65..5ef60d4f12b4 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core) u8 *mac; int err; - bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&core->dev); if (!bgmac) return -ENOMEM; bgmac->bcma.core = core; - bgmac->dev = &core->dev; bgmac->dma_dev = core->dma_dev; bgmac->irq = core->irq; @@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { - mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; @@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core) err1: bcma_mdio_mii_unregister(bgmac->mii_bus); err: - kfree(bgmac); bcma_set_drvdata(core, NULL); return err; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6f736c19872f..805e6ed6c390 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev) struct resource *regs; const u8 *mac_addr; - bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + bgmac = bgmac_alloc(&pdev->dev); if (!bgmac) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 0e066dc6b8cc..fe88126b1e0c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1148,7 +1148,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) return weight; if (handled < weight) { - napi_complete(napi); + napi_complete_done(napi, handled); bgmac_chip_intrs_on(bgmac); } @@ -1446,22 +1446,32 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); -int bgmac_enet_probe(struct bgmac *info) +struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; - int err; /* Allocation and references */ - net_dev = alloc_etherdev(sizeof(*bgmac)); + net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) - return -ENOMEM; + return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; + bgmac = netdev_priv(net_dev); - memcpy(bgmac, info, sizeof(*bgmac)); + bgmac->dev = dev; bgmac->net_dev = net_dev; + + return bgmac; +} +EXPORT_SYMBOL_GPL(bgmac_alloc); + +int bgmac_enet_probe(struct bgmac *bgmac) +{ + struct net_device *net_dev = bgmac->net_dev; + int err; + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); @@ -1488,7 +1498,7 @@ int bgmac_enet_probe(struct bgmac *info) err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); - goto err_netdev_free; + goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; @@ -1521,8 +1531,7 @@ err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); -err_netdev_free: - free_netdev(net_dev); +err_out: return err; } diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 71f493f2451f..ab2db76e4fb8 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -517,12 +517,13 @@ struct bgmac { int (*phy_connect)(struct bgmac *bgmac); }; -int bgmac_enet_probe(struct bgmac *info); +struct bgmac *bgmac_alloc(struct device *dev); +int bgmac_enet_probe(struct bgmac *bgmac); void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); -struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d5d1026be4b7..e3af1f3cb61f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | @@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp) (unsigned long) (bp->stats_blk->ctr + \ bp->temp_stats_blk->ctr) -static struct rtnl_link_stats64 * +static void bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); if (bp->stats_blk == NULL) - return net_stats; + return; net_stats->rx_packets = GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + @@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + GET_32BIT_NET_STATS(stat_FwRxDrop); - return net_stats; } /* All ethtool functions called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3e199d3e461e..9e8c06130c09 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - - /* put page reference used by the memory pool, since we - * won't be using this page as the mempool anymore. - */ - if (pool->page) - put_page(pool->page); - + if (!pool->page) { pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); if (unlikely(!pool->page)) return -ENOMEM; @@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; @@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, sge->addr_lo = cpu_to_le32(U64_LO(mapping)); pool->offset += SGE_PAGE_SIZE; - + if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) + get_page(pool->page); + else + pool->page = NULL; return 0; } @@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) * has been updated when NAPI was scheduled. */ if (IS_FCOE_FP(fp)) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); } else { bnx2x_update_fpsb_idx(fp); /* bnx2x_has_rx_work() reads the status block, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 5f19427c7b27..43423744fdfa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp) return port_type; } -static int bnx2x_get_vf_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int bnx2x_get_vf_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (bp->state == BNX2X_STATE_OPEN) { if (test_bit(BNX2X_LINK_REPORT_FD, &bp->vf_link_vars.link_report_flags)) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; - ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); + cmd->base.speed = bp->vf_link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = PORT_OTHER; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); u32 media_type; + u32 supported, advertising, lp_advertising; + + ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + cmd->link_modes.lp_advertising); /* Dual Media boards present all available port types */ - cmd->supported = bp->port.supported[cfg_idx] | + supported = bp->port.supported[cfg_idx] | (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); - cmd->advertising = bp->port.advertising[cfg_idx]; + advertising = bp->port.advertising[cfg_idx]; media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; if (media_type == ETH_PHY_SFP_1G_FIBER) { - cmd->supported &= ~(SUPPORTED_10000baseT_Full); - cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + supported &= ~(SUPPORTED_10000baseT_Full); + advertising &= ~(ADVERTISED_10000baseT_Full); } if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && !(bp->flags & MF_FUNC_DIS)) { - cmd->duplex = bp->link_vars.duplex; + cmd->base.duplex = bp->link_vars.duplex; if (IS_MF(bp) && !BP_NOMCP(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + cmd->base.speed = bnx2x_get_mf_speed(bp); else - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + cmd->base.speed = bp->link_vars.line_speed; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } - cmd->port = bnx2x_get_port_type(bp); + cmd->base.port = bnx2x_get_port_type(bp); - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.phy_address = bp->mdio.prtad; if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; /* Publish LP advertised speeds and FC */ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { u32 status = bp->link_vars.link_status; - cmd->lp_advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Pause; + lp_advertising |= ADVERTISED_Pause; if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) - cmd->lp_advertising |= ADVERTISED_Asym_Pause; + lp_advertising |= ADVERTISED_Asym_Pause; if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Half; + lp_advertising |= ADVERTISED_10baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10baseT_Full; + lp_advertising |= ADVERTISED_10baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Half; + lp_advertising |= ADVERTISED_100baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_100baseT_Full; + lp_advertising |= ADVERTISED_100baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + lp_advertising |= ADVERTISED_1000baseT_Half; if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseKX_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_1000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + lp_advertising |= ADVERTISED_2500baseX_Full; if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { if (media_type == ETH_PHY_KR) { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseKR_Full; } else { - cmd->lp_advertising |= + lp_advertising |= ADVERTISED_10000baseT_Full; } } if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + lp_advertising |= ADVERTISED_20000baseKR2_Full; } - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); return 0; } -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnx2x_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; u32 speed, phy_idx; + u32 supported; + u8 duplex = cmd->base.duplex; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (IS_MF_SD(bp)) return 0; DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + " duplex %d port %d phy_address %d\n" + " autoneg %d\n", + cmd->base.cmd, supported, advertising, + cmd->base.speed, + cmd->base.duplex, cmd->base.port, cmd->base.phy_address, + cmd->base.autoneg); - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; + if (duplex == DUPLEX_UNKNOWN) + duplex = DUPLEX_FULL; if (IS_MF_SI(bp)) { u32 part; @@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); old_multi_phy_config = bp->link_params.multi_phy_config; - if (cmd->port != bnx2x_get_port_type(bp)) { - switch (cmd->port) { + if (cmd->base.port != bnx2x_get_port_type(bp)) { + switch (cmd->base.port) { case PORT_TP: if (!(bp->port.supported[0] & SUPPORTED_TP || bp->port.supported[1] & SUPPORTED_TP)) { @@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->link_params.multi_phy_config = old_multi_phy_config; DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; if (bp->link_params.phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) @@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } /* advertise the requested speed and duplex if supported */ - if (cmd->advertising & ~an_supported_speed) { + if (advertising & ~an_supported_speed) { DP(BNX2X_MSG_ETHTOOL, "Advertisement parameters are not supported\n"); return -EINVAL; } bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | - cmd->advertising); - if (cmd->advertising) { + advertising); + if (advertising) { bp->link_params.speed_cap_mask[cfg_idx] = 0; - if (cmd->advertising & ADVERTISED_10baseT_Half) { + if (advertising & ADVERTISED_10baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; } - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; - if (cmd->advertising & ADVERTISED_100baseT_Half) { + if (advertising & ADVERTISED_100baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; } - if (cmd->advertising & ADVERTISED_1000baseT_Half) { + if (advertising & ADVERTISED_1000baseT_Half) { bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; } - if (cmd->advertising & (ADVERTISED_1000baseT_Full | + if (advertising & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseKX_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; - if (cmd->advertising & (ADVERTISED_10000baseT_Full | + if (advertising & (ADVERTISED_10000baseT_Full | ADVERTISED_10000baseKX4_Full | ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; - if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + if (advertising & ADVERTISED_20000baseKR2_Full) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } @@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* advertise the requested speed and duplex if supported */ switch (speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { + if (duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, @@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "1G half not supported\n"); return -EINVAL; @@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "2.5G half not supported\n"); return -EINVAL; @@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { + if (duplex != DUPLEX_FULL) { DP(BNX2X_MSG_ETHTOOL, "10G half not supported\n"); return -EINVAL; @@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } bp->link_params.req_line_speed[cfg_idx] = speed; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->link_params.req_duplex[cfg_idx] = duplex; bp->port.advertising[cfg_idx] = advertising; } @@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev, } static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_regs_len = bnx2x_get_regs_len, .get_regs = bnx2x_get_regs, @@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, .get_ts_info = bnx2x_get_ts_info, + .get_link_ksettings = bnx2x_get_link_ksettings, + .set_link_ksettings = bnx2x_set_link_ksettings, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { - .get_settings = bnx2x_get_vf_settings, .get_drvinfo = bnx2x_get_drvinfo, .get_msglevel = bnx2x_get_msglevel, .set_msglevel = bnx2x_set_msglevel, @@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .set_rxfh = bnx2x_set_rxfh, .get_channels = bnx2x_get_channels, .set_channels = bnx2x_set_channels, + .get_link_ksettings = bnx2x_get_vf_link_ksettings, }; void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4fcc6a84a087..aff3dc114a5b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -39,9 +39,6 @@ #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/udp_tunnel.h> -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#endif #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/cache.h> @@ -1130,7 +1127,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, dev_kfree_skb_any(skb); return NULL; } - tcp_gro_complete(skb); if (nw_off) { /* tunnel */ struct udphdr *uh = NULL; @@ -1180,6 +1176,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); + if (likely(skb)) + tcp_gro_complete(skb); #endif return skb; } @@ -1356,11 +1354,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; } goto next_rx_no_prod; @@ -1460,11 +1454,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } skb_record_rx_queue(skb, bnapi->index); - skb_mark_napi_id(skb, &bnapi->napi); - if (bnxt_busy_polling(bnapi)) - netif_receive_skb(skb); - else - napi_gro_receive(&bnapi->napi, skb); + napi_gro_receive(&bnapi->napi, skb); rc = 1; next_rx: @@ -1769,7 +1759,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) } if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_pkts); BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); } return rx_pkts; @@ -1782,9 +1772,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int work_done = 0; - if (!bnxt_lock_napi(bnapi)) - return budget; - while (1) { work_done += bnxt_poll_work(bp, bnapi, budget - work_done); @@ -1792,42 +1779,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; if (!bnxt_has_work(bp, cpr)) { - napi_complete(napi); - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + if (napi_complete_done(napi, work_done)) + BNXT_CP_DB_REARM(cpr->cp_doorbell, + cpr->cp_raw_cons); break; } } mmiowb(); - bnxt_unlock_napi(bnapi); return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int bnxt_busy_poll(struct napi_struct *napi) -{ - struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); - struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int rx_work, budget = 4; - - if (atomic_read(&bp->intr_sem) != 0) - return LL_FLUSH_FAILED; - - if (!bp->link_info.link_up) - return LL_FLUSH_FAILED; - - if (!bnxt_lock_poll(bnapi)) - return LL_FLUSH_BUSY; - - rx_work = bnxt_poll_work(bp, bnapi, budget); - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - - bnxt_unlock_poll(bnapi); - return rx_work; -} -#endif - static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -2506,6 +2467,8 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) static void bnxt_set_tpa_flags(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; if (bp->dev->features & NETIF_F_GRO) @@ -2535,7 +2498,7 @@ void bnxt_set_ring_params(struct bnxt *bp) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; - if (rx_space > PAGE_SIZE) { + if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { u32 jumbo_factor; bp->flags |= BNXT_FLAG_JUMBO; @@ -2669,6 +2632,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) goto out; } + if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && + !(vnic->flags & BNXT_VNIC_RSS_FLAG)) + continue; + /* Allocate rss table and hash key */ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &vnic->rss_table_dma_addr, @@ -2993,6 +2960,45 @@ alloc_mem_err: return rc; } +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } +} + void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { @@ -3312,10 +3318,26 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_protocol = keys->basic.ip_proto; - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + if (keys->basic.n_proto == htons(ETH_P_IPV6)) { + int i; + + req.ethertype = htons(ETH_P_IPV6); + req.ip_addr_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + *(struct in6_addr *)&req.src_ipaddr[0] = + keys->addrs.v6addrs.src; + *(struct in6_addr *)&req.dst_ipaddr[0] = + keys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + } + } else { + req.src_ipaddr[0] = keys->addrs.v4addrs.src; + req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } req.src_port = keys->ports.src; req.src_port_mask = cpu_to_be16(0xffff); @@ -3562,6 +3584,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); + } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { + req.rss_rule = + cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { req.rss_rule = cpu_to_le16(0xffff); } @@ -3665,6 +3693,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, return rc; } +static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_vnic_qcaps_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + if (resp->flags & + cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { u16 i; @@ -3811,6 +3860,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return rc; } +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) +{ + int rc; + + if (BNXT_PF(bp)) { + struct hwrm_func_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } else { + struct hwrm_func_vf_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = cpu_to_le16(idx); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + } + return rc; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { int i, rc = 0; @@ -3827,6 +3900,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); + if (rc) + netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); + } } for (i = 0; i < bp->tx_nr_rings; i++) { @@ -3977,6 +4056,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } + /* The completion rings are about to be freed. After that the + * IRQ doorbell will not work anymore. So we need to disable + * IRQ here. + */ + bnxt_disable_int_sync(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -3992,6 +4077,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +/* Caller must hold bp->hwrm_cmd_lock */ +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(fid); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + + return rc; +} + +int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(*tx_rings); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, u32 buf_tmrs, u16 flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) @@ -4463,8 +4592,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; + if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) + goto skip_rss_ctx; + /* allocate context for vnic */ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { @@ -4484,6 +4617,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) bp->rsscos_nr_ctxs++; } +skip_rss_ctx: /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4518,13 +4652,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) int i, rc = 0; for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; if (vnic_id >= bp->nr_vnics) break; - bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; + vnic = &bp->vnic_info[vnic_id]; + vnic->flags |= BNXT_VNIC_RFS_FLAG; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", @@ -4698,34 +4836,6 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) return bnxt_init_chip(bp, irq_re_init); } -static void bnxt_disable_int(struct bnxt *bp) -{ - int i; - - if (!bp->bnapi) - return; - - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - -static void bnxt_enable_int(struct bnxt *bp) -{ - int i; - - atomic_set(&bp->intr_sem, 0); - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_napi *bnapi = bp->bnapi[i]; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - - BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); - } -} - static int bnxt_set_real_num_queues(struct bnxt *bp) { int rc; @@ -4836,6 +4946,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } +#ifdef CONFIG_RFS_ACCEL +static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_rsscos_ctxs; +#endif + return bp->pf.max_rsscos_ctxs; +} + +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + if (BNXT_VF(bp)) + return bp->vf.max_vnics; +#endif + return bp->pf.max_vnics; +} +#endif + unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) @@ -5094,10 +5224,8 @@ static void bnxt_disable_napi(struct bnxt *bp) if (!bp->bnapi) return; - for (i = 0; i < bp->cp_nr_rings; i++) { + for (i = 0; i < bp->cp_nr_rings; i++) napi_disable(&bp->bnapi[i]->napi); - bnxt_disable_poll(bp->bnapi[i]); - } } static void bnxt_enable_napi(struct bnxt *bp) @@ -5106,7 +5234,6 @@ static void bnxt_enable_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { bp->bnapi[i]->in_reset = false; - bnxt_enable_poll(bp->bnapi[i]); napi_enable(&bp->bnapi[i]->napi); } } @@ -5384,7 +5511,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, { u8 autoneg = bp->link_info.autoneg; u16 fw_link_speed = bp->link_info.req_link_speed; - u32 advertising = bp->link_info.advertising; + u16 advertising = bp->link_info.advertising; if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= @@ -5489,6 +5616,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -5678,19 +5844,6 @@ static int bnxt_open(struct net_device *dev) return __bnxt_open_nic(bp, true, true); } -static void bnxt_disable_int_sync(struct bnxt *bp) -{ - int i; - - atomic_inc(&bp->intr_sem); - if (!netif_running(bp->dev)) - return; - - bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); -} - int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -5712,13 +5865,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) msleep(20); - /* Flush rings before disabling interrupts */ + /* Flush rings and and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ bnxt_disable_napi(bp); - bnxt_disable_int_sync(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); @@ -5765,16 +5917,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static struct rtnl_link_stats64 * +static void bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 i; struct bnxt *bp = netdev_priv(dev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - if (!bp->bnapi) - return stats; + return; /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { @@ -5821,8 +5971,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_errors = le64_to_cpu(tx->tx_err); } - - return stats; } static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) @@ -5975,20 +6123,36 @@ skip_uc: return rc; } +/* If the chip and firmware supports RFS */ +static bool bnxt_rfs_supported(struct bnxt *bp) +{ + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + return true; + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + return true; + return false; +} + +/* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { #ifdef CONFIG_RFS_ACCEL - struct bnxt_pf_info *pf = &bp->pf; - int vnics; + int vnics, max_vnics, max_rss_ctxs; if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + max_vnics = bnxt_get_max_func_vnics(bp); + max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + + /* RSS contexts not a limiting factor */ + if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) + max_rss_ctxs = max_vnics; + if (vnics > max_vnics || vnics > max_rss_ctxs) { netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", - min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); + min(max_rss_ctxs - 1, max_vnics - 1)); return false; } @@ -6044,6 +6208,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + flags &= ~BNXT_FLAG_TPA; + if (features & NETIF_F_HW_VLAN_CTAG_RX) flags |= BNXT_FLAG_STRIP_VLAN; @@ -6472,10 +6639,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) sh = true; if (tc) { - int max_rx_rings, max_tx_rings, rc; + int max_rx_rings, max_tx_rings, req_tx_rings, rsv_tx_rings, rc; + req_tx_rings = bp->tx_nr_rings_per_tc * tc; rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); - if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) + if (rc || req_tx_rings > max_tx_rings) + return -ENOMEM; + + rsv_tx_rings = req_tx_rings; + if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings) || + rsv_tx_rings < req_tx_rings) return -ENOMEM; } @@ -6567,12 +6740,18 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } - if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || + if ((fkeys->basic.n_proto != htons(ETH_P_IP) && + fkeys->basic.n_proto != htons(ETH_P_IPV6)) || ((fkeys->basic.ip_proto != IPPROTO_TCP) && (fkeys->basic.ip_proto != IPPROTO_UDP))) { rc = -EPROTONOSUPPORT; goto err_free; } + if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && + bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); @@ -6779,9 +6958,6 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = bnxt_busy_poll, -#endif }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -6920,8 +7096,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, int rc; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) - return rc; + if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { + /* Not enough rings, try disabling agg rings. */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) + return rc; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } if (bp->flags & BNXT_FLAG_ROCE_CAP) { int max_cp, max_stat, max_irq; @@ -6960,6 +7145,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + + rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); + if (rc) + netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -7107,11 +7297,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_port_led_qcaps(bp); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); bnxt_set_max_func_irqs(bp, max_irqs); - bnxt_set_dflt_rings(bp); + rc = bnxt_set_dflt_rings(bp); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + rc = -ENOMEM; + goto init_err; + } /* Default RSS hash cfg. */ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | @@ -7126,7 +7322,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } - if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnxt_hwrm_vnic_qcaps(bp); + if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16defe9ececc..52a1cc061ba3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -654,21 +654,9 @@ struct bnxt_napi { struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t poll_state; -#endif bool in_reset; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum bnxt_poll_state_t { - BNXT_STATE_IDLE = 0, - BNXT_STATE_NAPI, - BNXT_STATE_POLL, - BNXT_STATE_DISABLE, -}; -#endif - struct bnxt_irq { irq_handler_t handler; unsigned int vector; @@ -720,6 +708,7 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_FLAG 2 #define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_UCAST_FLAG 8 +#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 }; #if defined(CONFIG_BNXT_SRIOV) @@ -840,7 +829,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB u16 support_speeds; - u16 auto_link_speeds; + u16 auto_link_speeds; /* fw adv setting */ #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB @@ -863,7 +852,7 @@ struct bnxt_link_info { u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; - u32 advertising; + u16 advertising; /* user adv setting */ bool force_link_chng; /* a copy of phy_qcfg output used to report link @@ -879,6 +868,20 @@ struct bnxt_queue_info { u8 queue_profile; }; +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + u8 led_id; + u8 led_type; + u8 led_group_id; + u8 unused; + __le16 led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED)) + + __le16 led_color_caps; +}; + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 @@ -956,10 +959,12 @@ struct bnxt { #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_EEE_CAP 0x1000 + #define BNXT_FLAG_NEW_RSS_CAP 0x2000 #define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ BNXT_FLAG_ROCEV2_CAP) + #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1132,6 +1137,9 @@ struct bnxt { struct ethtool_eee eee; u32 lpi_tmr_lo; u32 lpi_tmr_hi; + + u8 num_leds; + struct bnxt_led_info leds[BNXT_MAX_LED]; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1141,93 +1149,6 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the NAPI poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_NAPI); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -/* called from the busy poll routine to get ownership of a bnapi */ -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_POLL); - - return rc == BNXT_STATE_IDLE; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ - atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ - int old; - - while (1) { - old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, - BNXT_STATE_DISABLE); - if (old == BNXT_STATE_IDLE) - break; - usleep_range(500, 5000); - } -} - -#else - -static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) -{ - return true; -} - -static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) -{ -} - -static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) -{ - return false; -} - -static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) -{ -} - -#endif - #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e @@ -1246,6 +1167,8 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bmap_size); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); +int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings); int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 784aa77610bc..24818e1e59f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -388,6 +388,7 @@ static int bnxt_set_channels(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); int max_rx_rings, max_tx_rings, tcs; + int req_tx_rings, rsv_tx_rings; u32 rc = 0; bool sh = false; @@ -423,6 +424,20 @@ static int bnxt_set_channels(struct net_device *dev, channel->tx_count > max_tx_rings)) return -ENOMEM; + req_tx_rings = sh ? channel->combined_count : channel->tx_count; + req_tx_rings = min_t(int, req_tx_rings, max_tx_rings); + if (tcs > 1) + req_tx_rings *= tcs; + + rsv_tx_rings = req_tx_rings; + if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings)) + return -ENOMEM; + + if (rsv_tx_rings < req_tx_rings) { + netdev_warn(dev, "Unable to allocate the requested tx rings\n"); + return -ENOMEM; + } + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -524,24 +539,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fltr_found: fkeys = &fltr->fkeys; - if (fkeys->basic.ip_proto == IPPROTO_TCP) - fs->flow_type = TCP_V4_FLOW; - else if (fkeys->basic.ip_proto == IPPROTO_UDP) - fs->flow_type = UDP_V4_FLOW; - else - goto fltr_err; + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); - fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; - fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); - fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; - fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); - fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + } else { + int i; - fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + if (fkeys->basic.ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V6_FLOW; + else if (fkeys->basic.ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V6_FLOW; + else + goto fltr_err; + + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + for (i = 0; i < 4; i++) { + fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); + fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); + } + fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); + + fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + } fs->ring_cookie = fltr->rxq; rc = 0; @@ -893,7 +933,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { - u16 fw_speeds = link_info->auto_link_speeds; + u16 fw_speeds = link_info->advertising; u8 fw_pause = 0; if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -1090,8 +1130,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; - u32 speed, fw_advertising = 0; bool set_pause = false; + u16 fw_advertising = 0; + u32 speed; int rc = 0; if (!BNXT_SINGLE_PF(bp)) @@ -2039,6 +2080,47 @@ static int bnxt_nway_reset(struct net_device *dev) return rc; } +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + u8 led_state; + __le16 duration; + int i, rc; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (state == ETHTOOL_ID_ACTIVE) { + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = cpu_to_le16(500); + } else if (state == ETHTOOL_ID_INACTIVE) { + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = cpu_to_le16(0); + } else { + return -EINVAL; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, @@ -2070,5 +2152,6 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_eee = bnxt_set_eee, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, - .nway_reset = bnxt_nway_reset + .nway_reset = bnxt_nway_reset, + .set_phys_id = bnxt_set_phys_id, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3abc03b60dbc..ed1e555292e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -10,6 +10,29 @@ #ifndef BNXT_ETHTOOL_H #define BNXT_ETHTOOL_H +struct bnxt_led_cfg { + u8 led_id; + u8 led_state; + u8 led_color; + u8 unused; + __le16 led_blink_on; + __le16 led_blink_off; + u8 led_group_id; + u8 rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ + PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \ + PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 2ddfa51519a1..5df32ab64f0c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,12 +11,12 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.6.0 */ +/* HSI and HWRM Specification 1.6.1 */ #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 6 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_STR "1.6.0" +#define HWRM_VERSION_STR "1.6.1" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -549,6 +549,8 @@ struct hwrm_ver_get_output { __le32 dev_caps_cfg; #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL u8 roce_fw_maj; u8 roce_fw_min; u8 roce_fw_bld; @@ -1919,6 +1921,219 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_port_led_cfg */ +/* Input (64 bytes) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* Output (16 bytes) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_port_led_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (48 bytes) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused_0[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + u8 led0_group_id; + u8 unused_1; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + u8 led1_group_id; + u8 unused_2; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + u8 led2_group_id; + u8 unused_3; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + u8 led3_group_id; + u8 unused_4; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_5; + u8 unused_6; + u8 unused_7; + u8 valid; +}; + /* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { @@ -2797,6 +3012,40 @@ struct hwrm_vnic_cfg_output { u8 valid; }; +/* hwrm_vnic_qcaps */ +/* Input (24 bytes) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + __le32 unused_0; +}; + +/* Output (24 bytes) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0; + u8 unused_1; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_vnic_tpa_cfg */ /* Input (40 bytes) */ struct hwrm_vnic_tpa_cfg_input { @@ -4058,9 +4307,7 @@ struct hwrm_fw_set_structured_data_input { __le64 src_data_addr; __le16 data_len; u8 hdr_cnt; - u8 unused_0; - __le16 port_id; - __le16 unused_1; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4077,7 +4324,7 @@ struct hwrm_fw_set_structured_data_output { }; /* hwrm_fw_get_structured_data */ -/* Input (40 bytes) */ +/* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { __le16 req_type; __le16 cmpl_ring; @@ -4097,8 +4344,6 @@ struct hwrm_fw_get_structured_data_input { #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL u8 count; u8 unused_0; - __le16 port_id; - __le16 unused_1[3]; }; /* Output (16 bytes) */ @@ -4582,7 +4827,8 @@ struct hwrm_nvm_install_update_input { __le32 install_type; #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL - __le32 unused_0; + __le16 flags; + __le16 unused_0; }; /* Output (24 bytes) */ @@ -4939,12 +5185,13 @@ struct ctx_hw_stats { struct hwrm_struct_hdr { __le16 struct_id; #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL - #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL - #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL - #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL - #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL - #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL - #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL __le16 len; u8 version; u8 count; @@ -4954,14 +5201,14 @@ struct hwrm_struct_hdr { __le16 unused_0[3]; }; -/* DCBX Application configuration structure (8 bytes) */ -struct hwrm_struct_data_dcbx_app_cfg { - __le16 protocol_id; +/* DCBX Application configuration structure (1057) (8 bytes) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; u8 protocol_selector; - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL - #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL u8 priority; u8 valid; u8 unused_0[3]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c69602508666..0b8cd7443843 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -15,6 +15,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -416,6 +417,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + int total_vf_tx_rings = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); @@ -429,6 +431,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; + vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | @@ -451,7 +455,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); - vf_vnics = 1; req.num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ @@ -459,6 +462,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { + int vf_tx_rsvd = vf_tx_rings; + req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -466,10 +471,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = le16_to_cpu(req.fid); + rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, + &vf_tx_rsvd); + if (rc) + break; + total_vf_tx_rings += vf_tx_rsvd; } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { - pf->max_tx_rings -= vf_tx_rings * num_vfs; + pf->max_tx_rings -= total_vf_tx_rings; pf->max_rx_rings -= vf_rx_rings * num_vfs; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; pf->max_cp_rings -= vf_cp_rings * num_vfs; @@ -506,6 +516,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } + if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) + rx_ok = 0; if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) tx_ok = 1; @@ -544,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out2; + bnxt_ulp_sriov_cfg(bp, *num_vfs); + rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; @@ -585,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp) rtnl_lock(); bnxt_restore_pf_fw_resources(bp); rtnl_unlock(); + + bnxt_ulp_sriov_cfg(bp, 0); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 435a2e4739d1..89d4feba1a9a 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget) sbdma_tx_process(sc, &(sc->sbm_txdma), 1); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ae42de4fdddf..a448177990fe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14145,8 +14145,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_link_ksettings = tg3_set_link_ksettings, }; -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tg3 *tp = netdev_priv(dev); @@ -14154,13 +14154,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, if (!tp->hw_stats) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); - return stats; + return; } tg3_get_nstats(tp, stats); spin_unlock_bh(&tp->lock); - - return stats; } static void tg3_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 112030828c4b..6e13c937d715 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) return rcvd; poll_exit: - napi_complete(napi); + napi_complete_done(napi, rcvd); rx_ctrl->rx_complete++; @@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) * Used spin_lock to synchronize reading of stats structures, which * is written by BNA under the same lock. */ -static struct rtnl_link_stats64 * +static void bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct bnad *bnad = netdev_priv(netdev); @@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) bnad_netdev_hwstats_fill(bnad, stats); spin_unlock_irqrestore(&bnad->bna_lock, flags); - - return stats; } static void @@ -3427,7 +3425,7 @@ static const struct net_device_ops bnad_netdev_ops = { .ndo_open = bnad_open, .ndo_stop = bnad_stop, .ndo_start_xmit = bnad_start_xmit, - .ndo_get_stats64 = bnad_get_stats64, + .ndo_get_stats64 = bnad_get_stats64, .ndo_set_rx_mode = bnad_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnad_set_mac_address, diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index baba2db9d9c2..016d481c6476 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1146,7 +1146,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* Packets received while interrupts were disabled */ status = macb_readl(bp, RSR); @@ -2146,6 +2146,9 @@ static int macb_open(struct net_device *dev) netif_tx_start_all_queues(dev); + if (bp->ptp_info) + bp->ptp_info->ptp_init(dev); + return 0; } @@ -2167,6 +2170,9 @@ static int macb_close(struct net_device *dev) macb_free_consistent(bp); + if (bp->ptp_info) + bp->ptp_info->ptp_remove(dev); + return 0; } @@ -2440,6 +2446,17 @@ static int macb_set_ringparam(struct net_device *netdev, return 0; } +static int macb_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct macb *bp = netdev_priv(netdev); + + if (bp->ptp_info) + return bp->ptp_info->get_ts_info(netdev, info); + + return ethtool_op_get_ts_info(netdev, info); +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2457,7 +2474,7 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, @@ -2470,6 +2487,7 @@ static const struct ethtool_ops gem_ethtool_ops = { static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct phy_device *phydev = dev->phydev; + struct macb *bp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; @@ -2477,7 +2495,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, rq, cmd); + if (!bp->ptp_info) + return phy_mii_ioctl(phydev, rq, cmd); + + switch (cmd) { + case SIOCSHWTSTAMP: + return bp->ptp_info->set_hwtst(dev, rq, cmd); + case SIOCGHWTSTAMP: + return bp->ptp_info->get_hwtst(dev, rq); + default: + return phy_mii_ioctl(phydev, rq, cmd); + } } static int macb_set_features(struct net_device *netdev, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index fc8550a5d47f..a2cf91223003 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -131,6 +131,20 @@ #define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ #define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ #define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */ +#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */ +#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */ +#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */ +#define GEM_TA 0x01d8 /* 1588 Timer Adjust */ +#define GEM_TI 0x01dc /* 1588 Timer Increment */ +#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */ +#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */ +#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */ +#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */ +#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */ +#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ +#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ +#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ #define GEM_DCFG1 0x0280 /* Design Config 1 */ #define GEM_DCFG2 0x0284 /* Design Config 2 */ #define GEM_DCFG3 0x0288 /* Design Config 3 */ @@ -174,6 +188,7 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 +#define MACB_SRTSM_OFFSET 15 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -319,6 +334,32 @@ #define MACB_PTZ_SIZE 1 #define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ #define MACB_WOL_SIZE 1 +#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */ +#define MACB_DRQFR_SIZE 1 +#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */ +#define MACB_SFR_SIZE 1 +#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */ +#define MACB_DRQFT_SIZE 1 +#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */ +#define MACB_SFT_SIZE 1 +#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */ +#define MACB_PDRQFR_SIZE 1 +#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */ +#define MACB_PDRSFR_SIZE 1 +#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */ +#define MACB_PDRQFT_SIZE 1 +#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */ +#define MACB_PDRSFT_SIZE 1 +#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ +#define MACB_SRI_SIZE 1 + +/* Timer increment fields */ +#define MACB_TI_CNS_OFFSET 0 +#define MACB_TI_CNS_SIZE 8 +#define MACB_TI_ACNS_OFFSET 8 +#define MACB_TI_ACNS_SIZE 8 +#define MACB_TI_NIT_OFFSET 16 +#define MACB_TI_NIT_SIZE 8 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -388,6 +429,17 @@ #define GEM_DAW64_OFFSET 23 #define GEM_DAW64_SIZE 1 +/* Bitfields in TISUBN */ +#define GEM_SUBNSINCR_OFFSET 0 +#define GEM_SUBNSINCR_SIZE 16 + +/* Bitfields in TI */ +#define GEM_NSINCR_OFFSET 0 +#define GEM_NSINCR_SIZE 8 + +/* Bitfields in ADJ */ +#define GEM_ADDSUB_OFFSET 31 +#define GEM_ADDSUB_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 #define MACB_CLK_DIV16 1 @@ -415,6 +467,7 @@ #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_JUMBO 0x00000020 +#define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -792,6 +845,20 @@ struct macb_or_gem_ops { int (*mog_rx)(struct macb *bp, int budget); }; +/* MACB-PTP interface: adapt to platform needs. */ +struct macb_ptp_info { + void (*ptp_init)(struct net_device *ndev); + void (*ptp_remove)(struct net_device *ndev); + s32 (*get_ptp_max_adj)(void); + unsigned int (*get_tsu_rate)(struct macb *bp); + int (*get_ts_info)(struct net_device *dev, + struct ethtool_ts_info *info); + int (*get_hwtst)(struct net_device *netdev, + struct ifreq *ifr); + int (*set_hwtst)(struct net_device *netdev, + struct ifreq *ifr, int cmd); +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -885,6 +952,7 @@ struct macb { u32 wol; + struct macb_ptp_info *ptp_info; /* macb-ptp interface */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT enum macb_hw_dma_cap hw_dma_cap; #endif @@ -895,4 +963,9 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } +static inline bool gem_has_ptp(struct macb *bp) +{ + return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP); +} + #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index ce7de6f72512..2bd7c638b178 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) work_done = xgmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; @@ -1446,9 +1446,9 @@ static void xgmac_poll_controller(struct net_device *dev) } #endif -static struct rtnl_link_stats64 * +static void xgmac_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) + struct rtnl_link_stats64 *storage) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *base = priv->base; @@ -1476,7 +1476,6 @@ xgmac_get_stats64(struct net_device *dev, writel(0, base + XGMAC_MMC_CTRL); spin_unlock_bh(&priv->stats_lock); - return storage; } static int xgmac_set_mac_address(struct net_device *dev, void *p) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index b00c3002360e..50384cede8be 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - max_rx = CFG_GET_OQ_MAX_Q(conf23); - max_tx = CFG_GET_IQ_MAX_Q(conf23); - rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx); - tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx); + max_rx = oct->sriov_info.num_pf_rings; + max_tx = oct->sriov_info.num_pf_rings; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; + } else if (OCTEON_CN23XX_VF(oct)) { + max_tx = oct->sriov_info.rings_per_vf; + max_rx = oct->sriov_info.rings_per_vf; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; } channel->max_rx = max_rx; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 39a9665c9d00..c12cfa4113cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2223,25 +2223,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - u32 qindex = 0; - struct lio *lio; - - lio = GET_LIO(dev); - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -2263,6 +2244,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct skb_shared_hwtstamps *shhwtstamps; u64 ns; u16 vtag = 0; + u32 r_dh_off; struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); @@ -2308,6 +2290,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + if (((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX)) && ptp_enable) { @@ -2320,16 +2304,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), /* Nanoseconds are in the first 64-bits * of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); } - skb_pull(skb, sizeof(ns)); } } + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && (((rh->r_dh.encap_on) && @@ -2365,7 +2360,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -2441,7 +2435,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -2451,8 +2445,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2629,7 +2627,9 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - oct_ptp_open(netdev); + if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && + ptp_enable) + oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -2677,13 +2677,7 @@ static int liquidio_stop(struct net_device *netdev) lio->linfo.link.s.link_up = 0; lio->link_changes++; - /* Pause for a moment and wait for Octeon to flush out (to the wire) any - * egress packets that are in-flight. - */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); - - /* Now it should be safe to tell Octeon that nic interface is down. */ + /* Tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); if (OCTEON_CN23XX_PF(oct)) { @@ -2973,9 +2967,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) */ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lio *lio = GET_LIO(netdev); + switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); + if ((lio->oct_dev->chip_id == OCTEON_CN66XX || + lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -3322,11 +3320,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -3741,7 +3739,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_link_state = liquidio_set_vf_link_state, - .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 70d96c10c673..631f1c0f9e4d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1455,26 +1455,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - struct lio *lio; - u32 qindex; - - lio = GET_LIO(dev); - - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -1497,6 +1477,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct net_device *netdev = (struct net_device *)arg; struct sk_buff *skb = (struct sk_buff *)skbuff; u16 vtag = 0; + u32 r_dh_off; if (netdev) { struct lio *lio = GET_LIO(netdev); @@ -1540,7 +1521,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } - skb_pull(skb, rh->r_dh.len * 8); + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hwtstamp) + r_dh_off -= BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && @@ -1577,7 +1571,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -1627,7 +1620,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -1637,8 +1630,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2440,11 +2437,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -2703,7 +2700,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, - .ndo_select_queue = select_q, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index ba329f6ca779..294c6f3c6b48 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -98,6 +98,9 @@ enum octeon_tag_type { #define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) +#define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U + static inline u32 incr_index(u32 index, u32 count, u32 max) { if ((index + count) >= max) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 1cb3514fc949..b3dc2e9651a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -429,15 +429,11 @@ struct octeon_config { /* The following config values are fixed and should not be modified. */ -/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */ -#define MAX_BAR1_MAP_INDEX 2 +#define BAR1_INDEX_DYNAMIC_MAP 2 +#define BAR1_INDEX_STATIC_MAP 15 #define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) -/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access. - * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access. - */ -#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \ - OCTEON_BAR1_ENTRY_SIZE) +#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking * NoResponse Lists are now maintained with each IQ. (Dec' 2007). diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 3265e0b7923e..42b673dce533 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -549,6 +549,16 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } + /* Dedicate one of Octeon's BAR1 index registers to create a static + * mapping to a region of Octeon DRAM that contains the PCI console + * named block. + */ + oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP; + oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, + true); + oct->console_nb_info.dram_region_base = addr + & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL); + /* num_consoles > 0, is an indication that the consoles * are accessible */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index a8df493a5012..9675ffbf25e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&droq->lock); writel(droq->pkt_count, droq->pkts_sent_reg); droq->pkt_count = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } @@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&iq->lock); writel(iq->pkt_in_done, iq->inst_cnt_reg); iq->pkt_in_done = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 18f6836250a6..c301a3852482 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -477,6 +477,12 @@ struct octeon_device { /* Console caches */ struct octeon_console console[MAX_OCTEON_MAPS]; + /* Console named block info */ + struct { + u64 dram_region_base; + int bar1_index; + } console_nb_info; + /* Coprocessor clock rate. */ u64 coproc_clock_rate; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index e04ca8f0b4a7..4608a5af35a3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx, void *app_ctx); int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget); + u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 73696b427f06..201b9875f9bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct, { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); @@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { - schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; @@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index fe60a3e6247b..c9376fe075bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -31,8 +31,8 @@ #define OCTEON_PFVFSIG 0x1122334455667788 #define OCTEON_PFVFERR 0xDEADDEADDEADDEAD -#define LIO_MBOX_WRITE_WAIT_CNT 1000 -#define LIO_MBOX_WRITE_WAIT_TIME 10 +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) enum octeon_mbox_cmd_status { OCTEON_MBOX_STATUS_SUCCESS = 0, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 13a18c9a7a51..5cd96e7d426c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -23,7 +23,7 @@ #include "response_manager.h" #include "octeon_device.h" -#define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP #ifdef __BIG_ENDIAN_BITFIELD static inline void @@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, u32 copy_len = 0, index_reg_val = 0; unsigned long flags; u8 __iomem *mapped_addr; + u64 static_mapping_base; + + static_mapping_base = oct->console_nb_info.dram_region_base; + + if (static_mapping_base && + static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) { + int bar1_index = oct->console_nb_info.bar1_index; + + mapped_addr = oct->mmio[1].hw_addr + + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE)) + + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL)); + + if (op) + octeon_pci_fastread(oct, mapped_addr, hostbuf, len); + else + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len); + + return; + } spin_lock_irqsave(&oct->mem_access_lock, flags); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index c3d6a8228362..0243be8dd56f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, /* Add in the response related fields. Opcode and Param are already * there. */ - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; @@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, *sc->status_word = COMPLETION_WORD_INIT; - if (OCTEON_CN23XX_PF(oct)) + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) sc->cmd.cmd3.rptr = sc->dmarptr; else sc->cmd.cmd2.rptr = sc->dmarptr; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 3ce66759e80a..707bc15adec6 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct, /* Can only be called from process context */ int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget) + u32 napi_budget) { u32 inst_processed = 0; u32 tot_inst_processed = 0; @@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); - if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - do { - /* Process any outstanding IQ packets. */ - if (iq->flush_index == iq->octeon_read_index) - break; - - if (napi_budget) - inst_processed = lio_process_iq_request_list - (oct, iq, - napi_budget - tot_inst_processed); - else - inst_processed = - lio_process_iq_request_list(oct, iq, 0); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } + if (napi_budget) + inst_processed = + lio_process_iq_request_list(oct, iq, + napi_budget - + tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } - tot_inst_processed += inst_processed; - inst_processed = 0; + tot_inst_processed += inst_processed; + inst_processed = 0; - } while (tot_inst_processed < napi_budget); + } while (tot_inst_processed < napi_budget); - if (napi_budget && (tot_inst_processed >= napi_budget)) - tx_done = 0; - } + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; iq->last_db_time = jiffies; @@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) iq->last_db_time = jiffies; /* Flush the instruction queue */ - octeon_flush_iq(oct, iq, 1, 0); + octeon_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 21f80f5744ba..a2138686c605 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* We stopped because no more packets were available. */ - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2e74bbaa38e1..02a986cdbb39 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; - ring->rx_max_pending = MAX_RCV_BUF_COUNT; - ring->rx_pending = qs->rbdr_len; + ring->rx_max_pending = MAX_CMP_QUEUE_LEN; + ring->rx_pending = qs->cq_len; ring->tx_max_pending = MAX_SND_QUEUE_LEN; ring->tx_pending = qs->sq_len; } +static int nicvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + u32 rx_count, tx_count; + + /* Due to HW errata this is not supported on T88 pass 1.x silicon */ + if (pass1_silicon(nic->pdev)) + return -EINVAL; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_count = clamp_t(u32, ring->tx_pending, + MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); + rx_count = clamp_t(u32, ring->rx_pending, + MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); + + if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) + return 0; + + /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ + qs->sq_len = rounddown_pow_of_two(tx_count); + qs->cq_len = rounddown_pow_of_two(rx_count); + + if (netif_running(netdev)) { + nicvf_stop(netdev); + nicvf_open(netdev); + } + + return 0; +} + static int nicvf_get_rss_hash_opts(struct nicvf *nic, struct ethtool_rxnfc *info) { @@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, } static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *hkey, u8 hfunc) + const u8 *hkey, const u8 hfunc) { struct nicvf *nic = netdev_priv(dev); struct nicvf_rss_info *rss = &nic->rss_info; @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_regs = nicvf_get_regs, .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, + .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2006f58b14b1..6feaa24bcfd4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* Slow packet rate, exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* Re-enable interrupts */ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->cq_idx); @@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev) /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); - if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + err = nicvf_update_hw_max_frs(nic, netdev->mtu); + if (err) goto cleanup; /* Clear percpu stats */ @@ -1461,8 +1462,8 @@ void nicvf_update_stats(struct nicvf *nic) nicvf_update_sq_stats(nic, qidx); } -static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; @@ -1478,7 +1479,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, stats->tx_packets = hw_stats->tx_frames; stats->tx_dropped = hw_stats->tx_drops; - return stats; } static void nicvf_tx_timeout(struct net_device *dev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d2ac133e36f1..ac0390be3b12 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; - cq_cfg.qsize = CMP_QSIZE; + cq_cfg.qsize = ilog2(qs->cq_len >> 10); cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; - sq_cfg.qsize = SND_QSIZE; + sq_cfg.qsize = ilog2(qs->sq_len >> 10); sq_cfg.tstmp_bgx_intf = 0; - sq_cfg.cq_limit = 0; + /* CQ's level at which HW will stop processing SQEs to avoid + * transmitting a pkt with no space in CQ to post CQE_TX. + */ + sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) { bool disable = false; struct queue_set *qs = nic->qs; + struct queue_set *pqs = nic->pnicvf->qs; int qidx; if (!qs) return 0; + /* Take primary VF's queue lengths. + * This is needed to take queue lengths set from ethtool + * into consideration. + */ + if (nic->sqs_mode && pqs) { + qs->cq_len = pqs->cq_len; + qs->sq_len = pqs->sq_len; + } + if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 9e2104675bc9..5cb84da99a2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -59,8 +59,9 @@ /* Default queue count per QS, its lengths and threshold values */ #define DEFAULT_RBDR_CNT 1 -#define SND_QSIZE SND_QUEUE_SIZE2 +#define SND_QSIZE SND_QUEUE_SIZE0 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL #define MIN_SQ_DESC_PER_PKT_XMIT 2 @@ -70,11 +71,18 @@ /* Keep CQ and SQ sizes same, if timestamping * is enabled this equation will change. */ -#define CMP_QSIZE CMP_QUEUE_SIZE2 +#define CMP_QSIZE CMP_QUEUE_SIZE0 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10)) +#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10)) #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ +/* No of CQEs that might anyway gets used by HW due to pipelining + * effects irrespective of PASS/DROP/LEVELS being configured + */ +#define CMP_QUEUE_PIPELINE_RSVD 544 + #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) @@ -93,8 +101,8 @@ * RED accepts pkt if unused CQE < 2304 & >= 2560 * DROPs pkts if unused CQE < 2304 */ -#define RQ_PASS_CQ_LVL 160ULL -#define RQ_DROP_CQ_LVL 144ULL +#define RQ_PASS_CQ_LVL 192ULL +#define RQ_DROP_CQ_LVL 184ULL /* RED and Backpressure levels of RBDR for pkt reception * For RBDR, level is a measure of fullness i.e 0x0 means empty diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 2f85b64f01fa..dfb2bad7ced5 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -894,17 +894,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) struct device *dev = &bgx->pdev->dev; struct lmac *lmac; char str[20]; - u8 dlm; - if (lmacid > bgx->max_lmac) + if (!bgx->is_dlm && lmacid) return; lmac = &bgx->lmac[lmacid]; - dlm = (lmacid / 2) + (bgx->bgx_id * 2); if (!bgx->is_dlm) sprintf(str, "BGX%d QLM mode", bgx->bgx_id); else - sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); + sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); switch (lmac->lmac_type) { case BGX_MODE_SGMII: @@ -990,7 +988,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) { struct lmac *lmac; - struct lmac *olmac; u64 cmr_cfg; u8 lmac_type; u8 lane_to_sds; @@ -1010,62 +1007,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) return; } - /* On 81xx BGX can be split across 2 DLMs - * firmware programs lmac_type of LMAC0 and LMAC2 + /* For DLMs or SLMs on 80/81/83xx so many lane configurations + * are possible and vary across boards. Also Kernel doesn't have + * any way to identify board type/info and since firmware does, + * just take lmac type and serdes lane config as is. */ - if ((idx == 0) || (idx == 2)) { - cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - /* Check if config is not reset value */ - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) - lmac->lmac_type = BGX_MODE_INVALID; - else - lmac->lmac_type = lmac_type; - lmac_set_training(bgx, lmac, lmac->lmacid); - lmac_set_lane2sds(bgx, lmac); - - olmac = &bgx->lmac[idx + 1]; - /* Check if other LMAC on the same DLM is already configured by - * firmware, if so use the same config or else set as same, as - * that of LMAC 0/2. - * This check is needed as on 80xx only one lane of each of the - * DLM of BGX0 is used, so have to rely on firmware for - * distingushing 80xx from 81xx. - */ - cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { - olmac->lmac_type = lmac->lmac_type; - lmac_set_lane2sds(bgx, olmac); - } else { - olmac->lmac_type = lmac_type; - olmac->lane_to_sds = lane_to_sds; - } - lmac_set_training(bgx, olmac, olmac->lmacid); - } -} - -static bool is_dlm0_in_bgx_mode(struct bgx *bgx) -{ - struct lmac *lmac; - - if (!bgx->is_dlm) - return true; - - lmac = &bgx->lmac[0]; - if (lmac->lmac_type == BGX_MODE_INVALID) - return false; - - return true; + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac->lane_to_sds = lane_to_sds; + lmac_set_training(bgx, lmac, lmac->lmacid); } static void bgx_get_qlm_mode(struct bgx *bgx) { struct lmac *lmac; - struct lmac *lmac01; - struct lmac *lmac23; u8 idx; /* Init all LMAC's type to invalid */ @@ -1081,29 +1042,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx) if (bgx->lmac_count > bgx->max_lmac) bgx->lmac_count = bgx->max_lmac; - for (idx = 0; idx < bgx->max_lmac; idx++) - bgx_set_lmac_config(bgx, idx); - - if (!bgx->is_dlm || bgx->is_rgx) { - bgx_print_qlm_mode(bgx, 0); - return; - } - - if (bgx->lmac_count) { - bgx_print_qlm_mode(bgx, 0); - bgx_print_qlm_mode(bgx, 2); - } - - /* If DLM0 is not in BGX mode then LMAC0/1 have - * to be configured with serdes lanes of DLM1 - */ - if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) - return; for (idx = 0; idx < bgx->lmac_count; idx++) { - lmac01 = &bgx->lmac[idx]; - lmac23 = &bgx->lmac[idx + 2]; - lmac01->lmac_type = lmac23->lmac_type; - lmac01->lane_to_sds = lmac23->lane_to_sds; + bgx_set_lmac_config(bgx, idx); + bgx_print_qlm_mode(bgx, idx); } } diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 86f467a2c485..d56142b98534 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget) int work_done = process_responses(adapter, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e4b5b057f417..1b9d154f1149 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget) __skb_queue_head_init(&queue); skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) { - napi_complete(napi); + napi_complete_done(napi, work_done); spin_unlock_irq(&q->lock); return work_done; } @@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done = process_responses(adap, qs, budget); if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* * Because we don't atomically flush the following diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0bce1bf9ca0f..163543b1ea0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -263,6 +263,11 @@ struct tp_params { u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + /* cached TP_OUT_CONFIG compressed error vector + * and passing outer header info for encapsulated packets. + */ + int rx_pkt_encap; + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a * subset of the set of fields which may be present in the Compressed * Filter Tuple portion of filters and TCP TCB connections. The @@ -581,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */ rspq_handler_t handler; rspq_flush_handler_t flush_handler; struct t4_lro_mgr lro_mgr; -#ifdef CONFIG_NET_RX_BUSY_POLL -#define CXGB_POLL_STATE_IDLE 0 -#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ -#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ -#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ -#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ -#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ - CXGB_POLL_STATE_POLL_YIELD) -#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ - CXGB_POLL_STATE_POLL) -#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ - CXGB_POLL_STATE_POLL_YIELD) - unsigned int bpoll_state; - spinlock_t bpoll_lock; /* lock for busy poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ - }; struct sge_eth_stats { /* Ethernet queue statistics */ @@ -782,6 +771,10 @@ struct vf_info { bool pf_set_mac; }; +struct mbox_list { + struct list_head list; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -844,6 +837,10 @@ struct adapter { struct work_struct db_drop_task; bool tid_release_task_busy; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; @@ -1160,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev) return netdev2pinfo(dev)->adapter; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ - spin_lock_init(&q->bpoll_lock); - q->bpoll_state = CXGB_POLL_STATE_IDLE; -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; - rc = false; - } else { - q->bpoll_state = CXGB_POLL_STATE_NAPI; - } - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - bool rc = true; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_LOCKED) { - q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; - rc = false; - } else { - q->bpoll_state |= CXGB_POLL_STATE_POLL; - } - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - bool rc = false; - - spin_lock_bh(&q->bpoll_lock); - if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) - rc = true; - q->bpoll_state = CXGB_POLL_STATE_IDLE; - spin_unlock_bh(&q->bpoll_lock); - return rc; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return q->bpoll_state & CXGB_POLL_USER_PEND; -} -#else -static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) -{ -} - -static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) -{ - return true; -} - -static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) -{ - return false; -} - -static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision @@ -1312,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); -int cxgb_busy_poll(struct napi_struct *napi); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; @@ -1488,6 +1388,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_shutdown_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6f951877430b..f4f569060689 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -188,18 +188,24 @@ static void link_report(struct net_device *dev) const struct port_info *p = netdev_priv(dev); switch (p->link_cfg.speed) { - case 10000: - s = "10Gbps"; + case 100: + s = "100Mbps"; break; case 1000: - s = "1000Mbps"; + s = "1Gbps"; break; - case 100: - s = "100Mbps"; + case 10000: + s = "10Gbps"; + break; + case 25000: + s = "25Gbps"; break; case 40000: s = "40Gbps"; break; + case 100000: + s = "100Gbps"; + break; default: pr_info("%s: unsupported speed: %d\n", dev->name, p->link_cfg.speed); @@ -738,14 +744,8 @@ static void quiesce_rx(struct adapter *adap) for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } - } } @@ -776,10 +776,9 @@ static void enable_rx(struct adapter *adap) if (!q) continue; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -2369,8 +2368,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, } EXPORT_SYMBOL(cxgb4_remove_server_filter); -static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *ns) +static void cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) { struct port_stats stats; struct port_info *p = netdev_priv(dev); @@ -2383,7 +2382,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, spin_lock(&adapter->stats_lock); if (!netif_device_present(dev)) { spin_unlock(&adapter->stats_lock); - return ns; + return; } t4_get_port_stats_offset(adapter, p->tx_chan, &stats, &p->stats_base); @@ -2417,7 +2416,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, ns->tx_errors = stats.tx_error_frames; ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; - return ns; } static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) @@ -2578,6 +2576,19 @@ static int cxgb_get_vf_config(struct net_device *dev, ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } + +static int cxgb_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct port_info *pi = netdev_priv(dev); + unsigned int phy_port_id; + + phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; + ppid->id_len = sizeof(phy_port_id); + memcpy(ppid->id, &phy_port_id, ppid->id_len); + return 0; +} + #endif static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2745,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_fcoe_enable = cxgb_fcoe_enable, .ndo_fcoe_disable = cxgb_fcoe_disable, #endif /* CONFIG_CHELSIO_T4_FCOE */ -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = cxgb_busy_poll, -#endif .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, }; @@ -2757,6 +2765,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@ -2777,8 +2786,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { void t4_fatal_err(struct adapter *adap) { - t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); - t4_intr_disable(adap); + int port; + + /* Disable the SGE since ULDs are going to free resources that + * could be exposed to the adapter. RDMA MWs for example... + */ + t4_shutdown_adapter(adap); + for_each_port(adap, port) { + struct net_device *dev = adap->port[port]; + + /* If we get here in very early initialization the network + * devices may not have been set up yet. + */ + if (!dev) + continue; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); } @@ -4397,9 +4422,9 @@ static void print_port_info(const struct net_device *dev) spd = " 8 GT/s"; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) - bufp += sprintf(bufp, "100/"); + bufp += sprintf(bufp, "100M/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) - bufp += sprintf(bufp, "1000/"); + bufp += sprintf(bufp, "1G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) @@ -4511,12 +4536,14 @@ static int config_mgmt_dev(struct pci_dev *pdev) int err; snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); - netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, + dummy_setup); if (!netdev) return -ENOMEM; pi = netdev_priv(netdev); pi->adapter = adap; + pi->port_id = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; @@ -4606,6 +4633,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; +#ifdef CONFIG_PCI_IOV + u32 v, port_vec; +#endif printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4707,6 +4737,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->win0_lock); + spin_lock_init(&adapter->mbox_lock); + + INIT_LIST_HEAD(&adapter->mlist.list); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); @@ -4982,6 +5015,19 @@ sriov: err = -ENOMEM; goto free_adapter; } + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); + + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, + &v, &port_vec); + if (err < 0) { + dev_err(adapter->pdev_dev, "Could not fetch port params\n"); + goto free_adapter; + } + + adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 8098902c094a..36105b6837cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) if (!q) return; - if (q->handler) { - cxgb_busy_poll_init_lock(q); + if (q->handler) napi_enable(&q->napi); - } + /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q) static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) { - if (q && q->handler) { + if (q && q->handler) napi_disable(&q->napi); - local_bh_disable(); - while (!cxgb_poll_lock_napi(q)) - mdelay(1); - local_bh_enable(); - } } static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index cbd68a8fe2e4..c9026352a842 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, struct ch_sched_params info; struct ch_sched_params tp; - memset(&info, 0, sizeof(info)); - memset(&tp, 0, sizeof(tp)); - memcpy(&tp, p, sizeof(tp)); /* Don't try to match class parameter */ tp.u.params.class = SCHED_CLS_NONE; @@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, if (e->state == SCHED_STATE_UNUSED) continue; - memset(&info, 0, sizeof(info)); memcpy(&info, &e->info, sizeof(info)); /* Don't try to match class parameter */ info.u.params.class = SCHED_CLS_NONE; @@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (!e) goto out; - memset(&np, 0, sizeof(np)); memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 9f606478c29c..f05f0d400324 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -43,9 +43,7 @@ #include <linux/export.h> #include <net/ipv6.h> #include <net/tcp.h> -#ifdef CONFIG_NET_RX_BUSY_POLL #include <net/busy_poll.h> -#endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_CHELSIO_T4_FCOE #include <scsi/fc/fc_fcoe.h> #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -1774,15 +1772,20 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, struct sge_uld_txq *txq; unsigned int idx = skb_txq(skb); - txq_info = adap->sge.uld_txq_info[tx_uld_type]; - txq = &txq_info->uldtxq[idx]; - if (unlikely(is_ctrl_pkt(skb))) { /* Single ctrl queue is a requirement for LE workaround path */ if (adap->tids.nsftids) idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); } + + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + if (unlikely(!txq_info)) { + WARN_ON(true); + return NET_XMIT_DROP; + } + + txq = &txq_info->uldtxq[idx]; return ofld_xmit(txq, skb); } @@ -2038,16 +2041,22 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; + u16 err_vec; struct port_info *pi; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); pkt = (const struct cpl_rx_pkt *)rsp; - csum_ok = pkt->csum_calc && !pkt->err_vec && + /* Compressed error vector is enabled for T6 only */ + if (q->adap->params.tp.rx_pkt_encap) + err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); + else + err_vec = be16_to_cpu(pkt->err_vec); + + csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP_F)) && - !(cxgb_poll_busy_polling(q)) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { do_gro(rxq, si, pkt); return 0; @@ -2092,7 +2101,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { - if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) + if (q->adap->params.tp.rx_pkt_encap) + csum_ok = err_vec & + T6_COMPR_RXERR_SUM_F; + else + csum_ok = err_vec & RXERR_CSUM_F; + if (!csum_ok) skb->ip_summed = CHECKSUM_UNNECESSARY; } } @@ -2273,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget) return budget - budget_left; } -#ifdef CONFIG_NET_RX_BUSY_POLL -int cxgb_busy_poll(struct napi_struct *napi) -{ - struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); - unsigned int params, work_done; - u32 val; - - if (!cxgb_poll_lock_poll(q)) - return LL_FLUSH_BUSY; - - work_done = process_responses(q, 4); - params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1); - q->next_intr_params = params; - val = CIDXINC_V(work_done) | SEINTARM_V(params); - - /* If we don't have access to the new User GTS (T5+), use the old - * doorbell mechanism; otherwise use the new BAR2 mechanism. - */ - if (unlikely(!q->bar2_addr)) - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), - val | INGRESSQID_V((u32)q->cntxt_id)); - else { - writel(val | INGRESSQID_V(q->bar2_qid), - q->bar2_addr + SGE_UDB_GTS); - wmb(); - } - - cxgb_poll_unlock_poll(q); - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance @@ -2323,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) int work_done; u32 val; - if (!cxgb_poll_lock_napi(q)) - return budget; - work_done = process_responses(q, budget); if (likely(work_done < budget)) { int timer_index; @@ -2365,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) q->bar2_addr + SGE_UDB_GTS); wmb(); } - cxgb_poll_unlock_napi(q); return work_done; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8139514d32c..87000cd39737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; + struct mbox_list entry; u16 access = 0; u16 execute = 0; u32 v; @@ -311,11 +312,62 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, timeout = -timeout; } + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adap->mbox_lock); + list_add_tail(&entry.list, &adap->mlist.list); + spin_unlock(&adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; + t4_record_mbox(adap, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adap->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + + /* Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); - if (v != MBOX_OWNER_DRV) { + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); return ret; @@ -366,6 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, execute = i + ms; t4_record_mbox(adap, cmd_rpl, MBOX_LEN, access, execute); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); return -FW_CMD_RETVAL_G((int)res); } } @@ -375,6 +430,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); + spin_lock(&adap->mbox_lock); + list_del(&entry.list); + spin_unlock(&adap->mbox_lock); + t4_fatal_err(adap); return ret; } @@ -5382,22 +5441,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) const char *t4_get_port_type_description(enum fw_port_type port_type) { static const char *const port_type_description[] = { - "R XFI", - "R XAUI", - "T SGMII", - "T XFI", - "T XAUI", + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", "KX4", "CX4", "KX", "KR", - "R SFP+", - "KR/KX", - "KR/KX/KX4", - "R QSFP_10G", - "R QSA", - "R QSFP", - "R BP40_BA", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + "KR4_100G", + "CR4_QSFP", + "CR_QSFP", + "CR2_QSFP", + "SFP28", + "KR_SFP28", }; if (port_type < ARRAY_SIZE(port_type_description)) @@ -5438,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx, void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A); #define GET_STAT(name) \ t4_read_reg64(adap, \ @@ -5469,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATTX_F) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCTX_F) + p->tx_mcast_frames -= p->tx_pause; + } p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); @@ -5497,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & COUNTPAUSESTATRX_F) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & COUNTPAUSEMCRX_F) + p->rx_mcast_frames -= p->rx_pause; + } + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; @@ -7477,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter) } /** + * t4_shutdown_adapter - shut down adapter, host & wire + * @adapter: the adapter + * + * Perform an emergency shutdown of the adapter and stop it from + * continuing any further communication on the ports or DMA to the + * host. This is typically used when the adapter and/or firmware + * have crashed and we want to prevent any further accidental + * communication with the rest of the world. This will also force + * the port Link Status to go down -- if register writes work -- + * which should help our peers figure out that we're down. + */ +int t4_shutdown_adapter(struct adapter *adapter) +{ + int port; + + t4_intr_disable(adapter); + t4_write_reg(adapter, DBG_GPIO_EN_A, 0); + for_each_port(adapter, port) { + u32 a_port_cfg = PORT_REG(port, + is_t4(adapter->params.chip) + ? XGMAC_PORT_CFG_A + : MAC_PORT_CFG_A); + + t4_write_reg(adapter, a_port_cfg, + t4_read_reg(adapter, a_port_cfg) + & ~SIGNAL_DET_V(1)); + } + t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0); + + return 0; +} + +/** * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID @@ -7686,6 +7802,13 @@ int t4_init_tp_params(struct adapter *adap) &adap->params.tp.ingress_config, 1, TP_INGRESS_CONFIG_A); } + /* For T6, cache the adapter's compressed error vector + * and passing outer header info for encapsulated packets. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + v = t4_read_reg(adap, TP_OUT_CONFIG_A); + adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0; + } /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a267173f5997..5043b64805f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1175,6 +1175,21 @@ struct cpl_rx_pkt { #define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S) #define RXERR_CSUM_F RXERR_CSUM_V(1U) +#define T6_COMPR_RXERR_LEN_S 1 +#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_LEN_F T6_COMPR_RXERR_LEN_V(1U) + +#define T6_COMPR_RXERR_VEC_S 0 +#define T6_COMPR_RXERR_VEC_M 0x3F +#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S) +#define T6_COMPR_RXERR_VEC_G(x) \ + (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M) + +/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */ +#define T6_COMPR_RXERR_SUM_S 4 +#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S) +#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U) + struct cpl_trace_pkt { u8 opcode; u8 intf; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 9fea255c7e87..3348d33c36fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -855,6 +855,14 @@ #define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S) #define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U) +#define DBG_GPIO_EN_A 0x6010 +#define XGMAC_PORT_CFG_A 0x1000 +#define MAC_PORT_CFG_A 0x800 + +#define SIGNAL_DET_S 14 +#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S) +#define SIGNAL_DET_F SIGNAL_DET_V(1U) + #define MC_ECC_STATUS_A 0x751c #define MC_P_ECC_STATUS_A 0x4131c @@ -1276,6 +1284,10 @@ #define DBGLARPTR_M 0x7fU #define DBGLARPTR_V(x) ((x) << DBGLARPTR_S) +#define CRXPKTENC_S 3 +#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S) +#define CRXPKTENC_F CRXPKTENC_V(1U) + #define TP_DBG_LA_DATAL_A 0x7ed8 #define TP_DBG_LA_CONFIG_A 0x7ed4 #define TP_OUT_CONFIG_A 0x7d04 @@ -1794,12 +1806,29 @@ #define MPS_CMN_CTL_A 0x9000 +#define COUNTPAUSEMCRX_S 5 +#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S) +#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U) + +#define COUNTPAUSESTATRX_S 4 +#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S) +#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U) + +#define COUNTPAUSEMCTX_S 3 +#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S) +#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U) + +#define COUNTPAUSESTATTX_S 2 +#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S) +#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U) + #define NUMPORTS_S 0 #define NUMPORTS_M 0x3U #define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M) #define MPS_INT_CAUSE_A 0x9008 #define MPS_TX_INT_CAUSE_A 0x9408 +#define MPS_STAT_CTL_A 0x9600 #define FRMERR_S 15 #define FRMERR_V(x) ((x) << FRMERR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 2accab386323..5fdaa16426c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0F -#define T4FW_VERSION_MICRO 0x25 +#define T4FW_VERSION_MINOR 0x10 +#define T4FW_VERSION_MICRO 0x1A #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0F -#define T5FW_VERSION_MICRO 0x25 +#define T5FW_VERSION_MINOR 0x10 +#define T5FW_VERSION_MICRO 0x1A #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0F -#define T6FW_VERSION_MICRO 0x25 +#define T6FW_VERSION_MINOR 0x10 +#define T6FW_VERSION_MICRO 0x1A #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0d1a134c8174..ac7a150c54e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -158,20 +158,23 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netif_carrier_on(dev); switch (pi->link_cfg.speed) { - case 40000: - s = "40Gbps"; + case 100: + s = "100Mbps"; + break; + case 1000: + s = "1Gbps"; break; - case 10000: s = "10Gbps"; break; - - case 1000: - s = "1000Mbps"; + case 25000: + s = "25Gbps"; break; - - case 100: - s = "100Mbps"; + case 40000: + s = "40Gbps"; + break; + case 100000: + s = "100Gbps"; break; default: diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f3ed9ce99e5e..e37dde2ba97f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) u32 val; if (likely(work_done < budget)) { - napi_complete(napi); + napi_complete_done(napi, work_done); intr_params = rspq->next_intr_params; rspq->next_intr_params = rspq->intr_params; } else diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 396c88678eab..7a7c02f1f8b9 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d pr_info("mdio write timed out\n"); } -static int ep93xx_rx(struct net_device *dev, int processed, int budget) +static int ep93xx_rx(struct net_device *dev, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); + int processed = 0; while (processed < budget) { int entry; @@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); - netif_receive_skb(skb); + napi_gro_receive(&ep->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; @@ -310,35 +311,17 @@ err: return processed; } -static int ep93xx_have_more_rx(struct ep93xx_priv *ep) -{ - struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; - return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); -} - static int ep93xx_poll(struct napi_struct *napi, int budget) { struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); struct net_device *dev = ep->dev; - int rx = 0; - -poll_some_more: - rx = ep93xx_rx(dev, rx, budget); - if (rx < budget) { - int more = 0; + int rx; + rx = ep93xx_rx(dev, budget); + if (rx < budget && napi_complete_done(napi, rx)) { spin_lock_irq(&ep->rx_lock); - __napi_complete(napi); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); - if (ep93xx_have_more_rx(ep)) { - wrl(ep, REG_INTEN, REG_INTEN_TX); - wrl(ep, REG_INTSTSP, REG_INTSTS_RX); - more = 1; - } spin_unlock_irq(&ep->rx_lock); - - if (more && napi_reschedule(napi)) - goto poll_some_more; } if (rx) { diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index cdd7a1a59aa7..c009f6ddabf7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -43,10 +43,8 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#endif #include <linux/crash_dump.h> +#include <net/busy_poll.h> #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -680,8 +678,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, } /* dev_base_lock rwlock held, nominally process context */ -static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +static void enic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; @@ -693,7 +691,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, * recorded stats. */ if (err == -ENOMEM) - return net_stats; + return; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -707,8 +705,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, net_stats->rx_over_errors = enic->rq_truncated_pkts; net_stats->rx_crc_errors = enic->rq_bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; - - return net_stats; } static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) @@ -1193,8 +1189,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); skb_mark_napi_id(skb, &enic->napi[rq->index]); - if (enic_poll_busy_polling(rq) || - !(netdev->features & NETIF_F_GRO)) + if (!(netdev->features & NETIF_F_GRO)) netif_receive_skb(skb); else napi_gro_receive(&enic->napi[q_number], skb); @@ -1298,15 +1293,6 @@ static int enic_poll(struct napi_struct *napi, int budget) wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, enic_wq_service, NULL); - if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { - if (wq_work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - wq_work_done, - 0 /* dont unmask intr */, - 0 /* dont reset intr timer */); - return budget; - } - if (budget > 0) rq_work_done = vnic_cq_service(&enic->cq[cq_rq], rq_work_to_do, enic_rq_service, NULL); @@ -1325,7 +1311,6 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - enic_poll_unlock_napi(&enic->rq[cq_rq], napi); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1345,7 +1330,7 @@ static int enic_poll(struct napi_struct *napi, int budget) * exit polling */ - napi_complete(napi); + napi_complete_done(napi, rq_work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); @@ -1392,34 +1377,6 @@ static void enic_set_rx_cpu_rmap(struct enic *enic) #endif /* CONFIG_RFS_ACCEL */ -#ifdef CONFIG_NET_RX_BUSY_POLL -static int enic_busy_poll(struct napi_struct *napi) -{ - struct net_device *netdev = napi->dev; - struct enic *enic = netdev_priv(netdev); - unsigned int rq = (napi - &enic->napi[0]); - unsigned int cq = enic_cq_rq(enic, rq); - unsigned int intr = enic_msix_rq_intr(enic, rq); - unsigned int work_to_do = -1; /* clean all pkts possible */ - unsigned int work_done; - - if (!enic_poll_lock_poll(&enic->rq[rq])) - return LL_FLUSH_BUSY; - work_done = vnic_cq_service(&enic->cq[cq], work_to_do, - enic_rq_service, NULL); - - if (work_done > 0) - vnic_intr_return_credits(&enic->intr[intr], - work_done, 0, 0); - vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); - if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_poll(&enic->rq[rq]); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static int enic_poll_msix_wq(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1461,8 +1418,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) unsigned int work_done = 0; int err; - if (!enic_poll_lock_napi(&enic->rq[rq])) - return budget; /* Service RQ */ @@ -1495,14 +1450,13 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_napi(&enic->rq[rq], napi); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, * exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); @@ -1753,10 +1707,9 @@ static int enic_open(struct net_device *netdev) netif_tx_wake_all_queues(netdev); - for (i = 0; i < enic->rq_count; i++) { - enic_busy_poll_init_lock(&enic->rq[i]); + for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); - } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) napi_enable(&enic->napi[enic_cq_wq(enic, i)]); @@ -1800,13 +1753,8 @@ static int enic_stop(struct net_device *netdev) enic_dev_disable(enic); - for (i = 0; i < enic->rq_count; i++) { + for (i = 0; i < enic->rq_count; i++) napi_disable(&enic->napi[i]); - local_bh_disable(); - while (!enic_poll_lock_napi(&enic->rq[i])) - mdelay(1); - local_bh_enable(); - } netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -2337,9 +2285,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif }; static const struct net_device_ops enic_netdev_ops = { @@ -2363,9 +2308,6 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = enic_busy_poll, -#endif }; static void enic_dev_deinit(struct enic *enic) diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index b9c82f143d7e..0413103ebe94 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -92,9 +92,6 @@ struct vnic_rq { struct vnic_rq_buf *to_clean; void *os_buf_head; unsigned int pkts_outstanding; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t bpoll_state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) @@ -207,81 +204,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, return 0; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_NAPI); - - return (rc == ENIC_POLL_STATE_IDLE); -} - -static inline void enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); - napi_gro_flush(napi, false); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, - ENIC_POLL_STATE_POLL); - - return (rc == ENIC_POLL_STATE_IDLE); -} - - -static inline void enic_poll_unlock_poll(struct vnic_rq *rq) -{ - WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); - atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); -} - -static inline bool enic_poll_busy_polling(struct vnic_rq *rq) -{ - return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; -} - -#else - -static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) -{ -} - -static inline bool enic_poll_lock_napi(struct vnic_rq *rq) -{ - return true; -} - -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, - struct napi_struct *napi) -{ - return false; -} - -static inline bool enic_poll_lock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) -{ - return false; -} - -static inline bool enic_poll_ll_polling(struct vnic_rq *rq) -{ - return false; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - void vnic_rq_free(struct vnic_rq *rq); int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size); diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 57c17e797ae3..127ce9707378 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -1485,95 +1485,104 @@ static void __de_get_regs(struct de_private *de, u8 *buf) de_rx_missed(de, rbuf[8]); } -static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_get_link_ksettings(struct de_private *de, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = de->media_supported; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->phy_address = 0; - ecmd->advertising = de->media_advertise; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + de->media_supported); + cmd->base.phy_address = 0; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + de->media_advertise); switch (de->media_type) { case DE_MEDIA_AUI: - ecmd->port = PORT_AUI; + cmd->base.port = PORT_AUI; break; case DE_MEDIA_BNC: - ecmd->port = PORT_BNC; + cmd->base.port = PORT_BNC; break; default: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; } - ethtool_cmd_speed_set(ecmd, 10); + cmd->base.speed = 10; if (dr32(MacMode) & FullDuplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (de->media_lock) - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; else - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } -static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) +static int __de_set_link_ksettings(struct de_private *de, + const struct ethtool_link_ksettings *cmd) { u32 new_media; unsigned int media_lock; + u8 duplex = cmd->base.duplex; + u8 port = cmd->base.port; + u8 autoneg = cmd->base.autoneg; + u32 advertising; - if (ethtool_cmd_speed(ecmd) != 10) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.speed != 10) return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; - if (de->de21040 && ecmd->port == PORT_BNC) + if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC) return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) + if (de->de21040 && port == PORT_BNC) return -EINVAL; - if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE) return -EINVAL; - if (ecmd->advertising & ~de->media_supported) + if (advertising & ~de->media_supported) return -EINVAL; - if (ecmd->autoneg == AUTONEG_ENABLE && - (!(ecmd->advertising & ADVERTISED_Autoneg))) + if (autoneg == AUTONEG_ENABLE && + (!(advertising & ADVERTISED_Autoneg))) return -EINVAL; - switch (ecmd->port) { + switch (port) { case PORT_AUI: new_media = DE_MEDIA_AUI; - if (!(ecmd->advertising & ADVERTISED_AUI)) + if (!(advertising & ADVERTISED_AUI)) return -EINVAL; break; case PORT_BNC: new_media = DE_MEDIA_BNC; - if (!(ecmd->advertising & ADVERTISED_BNC)) + if (!(advertising & ADVERTISED_BNC)) return -EINVAL; break; default: - if (ecmd->autoneg == AUTONEG_ENABLE) + if (autoneg == AUTONEG_ENABLE) new_media = DE_MEDIA_TP_AUTO; - else if (ecmd->duplex == DUPLEX_FULL) + else if (duplex == DUPLEX_FULL) new_media = DE_MEDIA_TP_FD; else new_media = DE_MEDIA_TP; - if (!(ecmd->advertising & ADVERTISED_TP)) + if (!(advertising & ADVERTISED_TP)) return -EINVAL; - if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) + if (!(advertising & (ADVERTISED_10baseT_Full | + ADVERTISED_10baseT_Half))) return -EINVAL; break; } - media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; + media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1; if ((new_media == de->media_type) && (media_lock == de->media_lock) && - (ecmd->advertising == de->media_advertise)) + (advertising == de->media_advertise)) return 0; /* nothing to change */ de_link_down(de); @@ -1582,7 +1591,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) de->media_type = new_media; de->media_lock = media_lock; - de->media_advertise = ecmd->advertising; + de->media_advertise = advertising; de_set_media(de); if (netif_running(de->dev)) de_start_rxtx(de); @@ -1604,25 +1613,27 @@ static int de_get_regs_len(struct net_device *dev) return DE_REGS_SIZE; } -static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_get_settings(de, ecmd); + rc = __de_get_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; } -static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int de_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); - rc = __de_set_settings(de, ecmd); + rc = __de_set_link_ksettings(de, cmd); spin_unlock_irq(&de->lock); return rc; @@ -1690,13 +1701,13 @@ static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, - .get_settings = de_get_settings, - .set_settings = de_set_settings, .get_msglevel = de_get_msglevel, .set_msglevel = de_set_msglevel, .get_eeprom = de_get_eeprom, .nway_reset = de_nway_reset, .get_regs = de_get_regs, + .get_link_ksettings = de_get_link_ksettings, + .set_link_ksettings = de_set_link_ksettings, }; static void de21040_get_mac_address(struct de_private *de) diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index 92306b320840..ba6ae24acf62 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget) /* Remove us from polling list and enable RX intr. */ - napi_complete(napi); - iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + napi_complete_done(napi, work_done); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: * 1. it can race with disabling irqs in irq handler @@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget) * before we did napi_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - napi_complete(napi); + napi_complete_done(napi, work_done); return work_done; } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index f82ebe5d89ee..8d98b259d1ba 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -926,48 +926,53 @@ static void uli526x_set_filter_mode(struct net_device * dev) } static void -ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) +ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db, + struct ethtool_link_ksettings *cmd) { - ecmd->supported = (SUPPORTED_10baseT_Half | + u32 supported, advertising; + + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); - ecmd->advertising = (ADVERTISED_10baseT_Half | + advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->port = PORT_MII; - ecmd->phy_address = db->phy_addr; - - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_MII; + cmd->base.phy_address = db->phy_addr; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->duplex = DUPLEX_HALF; + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) { - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; } if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) { - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } if(db->link_failed) { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } if (db->media_mode & ULI526X_AUTO) { - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; } } @@ -981,10 +986,12 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ struct uli526x_board_info *np = netdev_priv(dev); - ULi_ethtool_gset(np, cmd); + ULi_ethtool_get_link_ksettings(np, cmd); return 0; } @@ -1006,9 +1013,9 @@ static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = netdev_get_link, .get_wol = uli526x_get_wol, + .get_link_ksettings = netdev_get_link_ksettings, }; /* diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index bc9bf88e5831..d1f2f3cc7cfa 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1391,25 +1391,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1439,12 +1441,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 8c95a8a81e3c..1e350135f11d 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1256,52 +1256,63 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } -static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 supported, advertising; + if (np->phy_media) { /* fiber device */ - cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_INTERNAL; + supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; + advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; } else { /* copper device */ - cmd->supported = SUPPORTED_10baseT_Half | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; - cmd->advertising = ADVERTISED_10baseT_Half | + advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| + ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII; - cmd->port = PORT_MII; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.port = PORT_MII; } - if ( np->link_status ) { - ethtool_cmd_speed_set(cmd, np->speed); - cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + if (np->link_status) { + cmd->base.speed = np->speed; + cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - if ( np->an_enable) - cmd->autoneg = AUTONEG_ENABLE; + if (np->an_enable) + cmd->base.autoneg = AUTONEG_ENABLE; else - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; + + cmd->base.phy_address = np->phy_addr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - cmd->phy_address = np->phy_addr; return 0; } -static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rio_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); + u32 speed = cmd->base.speed; + u8 duplex = cmd->base.duplex; + netif_carrier_off(dev); - if (cmd->autoneg == AUTONEG_ENABLE) { - if (np->an_enable) + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (np->an_enable) { return 0; - else { + } else { np->an_enable = 1; mii_set_media(dev); return 0; @@ -1309,18 +1320,18 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { np->an_enable = 0; if (np->speed == 1000) { - ethtool_cmd_speed_set(cmd, SPEED_100); - cmd->duplex = DUPLEX_FULL; + speed = SPEED_100; + duplex = DUPLEX_FULL; printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); } - switch (ethtool_cmd_speed(cmd)) { + switch (speed) { case SPEED_10: np->speed = 10; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_100: np->speed = 100; - np->full_duplex = (cmd->duplex == DUPLEX_FULL); + np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_1000: /* not supported */ default: @@ -1339,9 +1350,9 @@ static u32 rio_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = rio_get_drvinfo, - .get_settings = rio_get_settings, - .set_settings = rio_set_settings, .get_link = rio_get_link, + .get_link_ksettings = rio_get_link_ksettings, + .set_link_ksettings = rio_set_link_ksettings, }; static int diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 2e5b66762e15..2704bcf023be 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1664,21 +1664,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1800,8 +1802,6 @@ static int sundance_set_wol(struct net_device *dev, static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, - .get_settings = get_settings, - .set_settings = set_settings, .nway_reset = nway_reset, .get_link = get_link, .get_wol = sundance_get_wol, @@ -1811,6 +1811,8 @@ static const struct ethtool_ops ethtool_ops = { .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_ethtool_stats, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 2a17c59f69f9..3e77dd863175 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts. */ - napi_complete(napi); + napi_complete_done(napi, npackets); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 7bf78a0d322c..278f139f2a22 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -457,7 +457,7 @@ static int ec_bhf_stop(struct net_device *net_dev) return 0; } -static struct rtnl_link_stats64 * +static void ec_bhf_get_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { @@ -472,8 +472,6 @@ ec_bhf_get_stats(struct net_device *net_dev, stats->tx_bytes = priv->stat_tx_bytes; stats->rx_bytes = priv->stat_rx_bytes; - - return stats; } static const struct net_device_ops ec_bhf_netdev_ops = { diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4c30c44b242e..d49528ad7821 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -226,11 +226,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ u64 tx_reqs_prev; /* Used to calculate TX pps */ }; -enum { - NAPI_POLLING, - BUSY_POLLING -}; - struct be_mcc_obj { struct be_queue_info q; struct be_queue_info cq; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 0a48a31225e6..7d1819c9e8cc 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -606,7 +606,8 @@ bool be_pause_supported(struct be_adapter *adapter) false : true; } -static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int be_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct be_adapter *adapter = netdev_priv(netdev); u8 link_status; @@ -614,13 +615,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) int status; u32 auto_speeds; u32 fixed_speeds; + u32 supported = 0, advertising = 0; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); - ethtool_cmd_speed_set(ecmd, link_speed); + cmd->base.speed = link_speed; status = be_cmd_get_phy_info(adapter); if (!status) { @@ -629,58 +631,51 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) be_cmd_query_cable_type(adapter); - ecmd->supported = + supported = convert_to_et_setting(adapter, auto_speeds | fixed_speeds); - ecmd->advertising = + advertising = convert_to_et_setting(adapter, auto_speeds); - ecmd->port = be_get_port_type(adapter); + cmd->base.port = be_get_port_type(adapter); if (adapter->phy.auto_speeds_supported) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + advertising |= ADVERTISED_Autoneg; } - ecmd->supported |= SUPPORTED_Pause; + supported |= SUPPORTED_Pause; if (be_pause_supported(adapter)) - ecmd->advertising |= ADVERTISED_Pause; - - switch (adapter->phy.interface_type) { - case PHY_TYPE_KR_10GB: - case PHY_TYPE_KX4_10GB: - ecmd->transceiver = XCVR_INTERNAL; - break; - default: - ecmd->transceiver = XCVR_EXTERNAL; - break; - } + advertising |= ADVERTISED_Pause; } else { - ecmd->port = PORT_OTHER; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; + cmd->base.port = PORT_OTHER; + cmd->base.autoneg = AUTONEG_DISABLE; } /* Save for future use */ - adapter->phy.link_speed = ethtool_cmd_speed(ecmd); - adapter->phy.port_type = ecmd->port; - adapter->phy.transceiver = ecmd->transceiver; - adapter->phy.autoneg = ecmd->autoneg; - adapter->phy.advertising = ecmd->advertising; - adapter->phy.supported = ecmd->supported; + adapter->phy.link_speed = cmd->base.speed; + adapter->phy.port_type = cmd->base.port; + adapter->phy.autoneg = cmd->base.autoneg; + adapter->phy.advertising = advertising; + adapter->phy.supported = supported; } else { - ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); - ecmd->port = adapter->phy.port_type; - ecmd->transceiver = adapter->phy.transceiver; - ecmd->autoneg = adapter->phy.autoneg; - ecmd->advertising = adapter->phy.advertising; - ecmd->supported = adapter->phy.supported; + cmd->base.speed = adapter->phy.link_speed; + cmd->base.port = adapter->phy.port_type; + cmd->base.autoneg = adapter->phy.autoneg; + advertising = adapter->phy.advertising; + supported = adapter->phy.supported; } - ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; - ecmd->phy_address = adapter->port_num; + cmd->base.duplex = netif_carrier_ok(netdev) ? + DUPLEX_FULL : DUPLEX_UNKNOWN; + cmd->base.phy_address = adapter->port_num; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } @@ -1399,7 +1394,6 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags) } const struct ethtool_ops be_ethtool_ops = { - .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, .get_wol = be_get_wol, .set_wol = be_set_wol, @@ -1433,5 +1427,6 @@ const struct ethtool_ops be_ethtool_ops = { .get_channels = be_get_channels, .set_channels = be_set_channels, .get_module_info = be_get_module_info, - .get_module_eeprom = be_get_module_eeprom + .get_module_eeprom = be_get_module_eeprom, + .get_link_ksettings = be_get_link_ksettings, }; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index cd49a54c538d..6be3b9aba8ed 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -647,8 +647,8 @@ void be_parse_stats(struct be_adapter *adapter) } } -static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void be_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct be_adapter *adapter = netdev_priv(netdev); struct be_drv_stats *drvs = &adapter->drv_stats; @@ -712,7 +712,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + drvs->rx_input_fifo_overflow_drop + drvs->rx_drops_no_pbuf; - return stats; } void be_link_status_update(struct be_adapter *adapter, u8 link_status) @@ -3064,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp) } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, - int budget, int polling) + int budget) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; @@ -3096,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, goto loop_continue; } - /* Don't do gro when we're busy_polling */ - if (do_gro(rxcp) && polling != BUSY_POLLING) + if (do_gro(rxcp)) be_rx_compl_process_gro(rxo, napi, rxcp); else be_rx_compl_process(rxo, napi, rxcp); @@ -3195,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, } } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock(&eqo->lock); /* BH is already disabled */ - if (eqo->state & BE_EQ_LOCKED) { - WARN_ON(eqo->state & BE_EQ_NAPI); - eqo->state |= BE_EQ_NAPI_YIELD; - status = false; - } else { - eqo->state = BE_EQ_NAPI; - } - spin_unlock(&eqo->lock); - return status; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ - spin_lock(&eqo->lock); /* BH is already disabled */ - - WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); - eqo->state = BE_EQ_IDLE; - - spin_unlock(&eqo->lock); -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - bool status = true; - - spin_lock_bh(&eqo->lock); - if (eqo->state & BE_EQ_LOCKED) { - eqo->state |= BE_EQ_POLL_YIELD; - status = false; - } else { - eqo->state |= BE_EQ_POLL; - } - spin_unlock_bh(&eqo->lock); - return status; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_bh(&eqo->lock); - - WARN_ON(eqo->state & (BE_EQ_NAPI)); - eqo->state = BE_EQ_IDLE; - - spin_unlock_bh(&eqo->lock); -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ - spin_lock_init(&eqo->lock); - eqo->state = BE_EQ_IDLE; -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ - local_bh_disable(); - - /* It's enough to just acquire napi lock on the eqo to stop - * be_busy_poll() from processing any queueus. - */ - while (!be_lock_napi(eqo)) - mdelay(1); - - local_bh_enable(); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline bool be_lock_napi(struct be_eq_obj *eqo) -{ - return true; -} - -static inline void be_unlock_napi(struct be_eq_obj *eqo) -{ -} - -static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) -{ - return false; -} - -static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_enable_busy_poll(struct be_eq_obj *eqo) -{ -} - -static inline void be_disable_busy_poll(struct be_eq_obj *eqo) -{ -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); @@ -3309,25 +3207,20 @@ int be_poll(struct napi_struct *napi, int budget) for_all_tx_queues_on_eq(adapter, eqo, txo, i) be_process_tx(adapter, txo, i); - if (be_lock_napi(eqo)) { - /* This loop will iterate twice for EQ0 in which - * completions of the last RXQ (default one) are also processed - * For other EQs the loop iterates only once - */ - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, budget, NAPI_POLLING); - max_work = max(work, max_work); - } - be_unlock_napi(eqo); - } else { - max_work = budget; + /* This loop will iterate twice for EQ0 in which + * completions of the last RXQ (default one) are also processed + * For other EQs the loop iterates only once + */ + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, budget); + max_work = max(work, max_work); } if (is_mcc_eqo(eqo)) be_process_mcc(adapter); if (max_work < budget) { - napi_complete(napi); + napi_complete_done(napi, max_work); /* Skyhawk EQ_DB has a provision to set the rearm to interrupt * delay via a delay multiplier encoding value @@ -3344,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget) return max_work; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int be_busy_poll(struct napi_struct *napi) -{ - struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); - struct be_adapter *adapter = eqo->adapter; - struct be_rx_obj *rxo; - int i, work = 0; - - if (!be_lock_busy_poll(eqo)) - return LL_FLUSH_BUSY; - - for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { - work = be_process_rx(rxo, napi, 4, BUSY_POLLING); - if (work) - break; - } - - be_unlock_busy_poll(eqo); - return work; -} -#endif - void be_detect_error(struct be_adapter *adapter) { u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; @@ -3670,7 +3541,6 @@ static int be_close(struct net_device *netdev) if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); - be_disable_busy_poll(eqo); } adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; } @@ -3840,7 +3710,6 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); - be_enable_busy_poll(eqo); be_eq_notify(adapter, eqo->q.id, true, true, 0, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -5246,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = { #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = be_busy_poll, -#endif .ndo_udp_tunnel_add = be_add_vxlan_port, .ndo_udp_tunnel_del = be_del_vxlan_port, .ndo_features_check = be_features_check, diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 45abc81f6f55..f18aba05f1c2 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -180,8 +180,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region - * @dma_alloc: dma allocated buffer size - * @io_region_size: I/O memory region size * @num_bd: number of buffer descriptors * @num_tx: number of send buffers * @cur_tx: last send buffer written @@ -199,8 +197,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); struct ethoc { void __iomem *iobase; void __iomem *membase; - int dma_alloc; - resource_size_t io_region_size; bool big_endian; unsigned int num_bd; @@ -618,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget) tx_work_done = ethoc_tx(priv->netdev, budget); if (rx_work_done < budget && tx_work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_work_done); ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); } @@ -999,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev, return 0; } -const struct ethtool_ops ethoc_ethtool_ops = { +static const struct ethtool_ops ethoc_ethtool_ops = { .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .nway_reset = phy_ethtool_nway_reset, @@ -1096,8 +1092,6 @@ static int ethoc_probe(struct platform_device *pdev) /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; - priv->dma_alloc = 0; - priv->io_region_size = resource_size(mmio); priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); @@ -1127,7 +1121,6 @@ static int ethoc_probe(struct platform_device *pdev) goto free; } netdev->mem_end = netdev->mem_start + buffer_size; - priv->dma_alloc = buffer_size; } priv->big_endian = pdata ? pdata->big_endian : diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 223f35cc034c..992ebe973d25 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) if (work_done < budget) { u32 buf_int_enable_value = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index dce5f7b7f772..c0ddbbe6c226 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -825,16 +825,18 @@ static void ftmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_gset(&priv->mii, cmd); + return mii_ethtool_get_link_ksettings(&priv->mii, cmd); } -static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int ftmac100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ftmac100 *priv = netdev_priv(netdev); - return mii_ethtool_sset(&priv->mii, cmd); + return mii_ethtool_set_link_ksettings(&priv->mii, cmd); } static int ftmac100_nway_reset(struct net_device *netdev) @@ -850,11 +852,11 @@ static u32 ftmac100_get_link(struct net_device *netdev) } static const struct ethtool_ops ftmac100_ethtool_ops = { - .set_settings = ftmac100_set_settings, - .get_settings = ftmac100_get_settings, .get_drvinfo = ftmac100_get_drvinfo, .nway_reset = ftmac100_nway_reset, .get_link = ftmac100_get_link, + .get_link_ksettings = ftmac100_get_link_ksettings, + .set_link_ksettings = ftmac100_set_link_ksettings, }; /****************************************************************************** diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 9cb436cb3745..766636a7c25e 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1817,25 +1817,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1865,12 +1867,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index c9b7ad65e563..25a14a3fe784 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -313,8 +313,8 @@ static void dpaa_tx_timeout(struct net_device *net_dev) /* Calculates the statistics for the given device by adding the statistics * collected by each CPU. */ -static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *s) +static void dpaa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *s) { int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); struct dpaa_priv *priv = netdev_priv(net_dev); @@ -332,8 +332,6 @@ static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, for (j = 0; j < numstats; j++) netstats[j] += cpustats[j]; } - - return s; } static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) @@ -2003,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { - napi_complete(napi); + napi_complete_done(napi, cleaned); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } else if (np->down) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 27e7044667d1..15571e251fb9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -72,8 +72,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu) #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global) -static int dpaa_get_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { int err; @@ -82,13 +82,13 @@ static int dpaa_get_settings(struct net_device *net_dev, return 0; } - err = phy_ethtool_gset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_get(net_dev->phydev, cmd); return err; } -static int dpaa_set_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int dpaa_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { int err; @@ -97,9 +97,9 @@ static int dpaa_set_settings(struct net_device *net_dev, return -ENODEV; } - err = phy_ethtool_sset(net_dev->phydev, et_cmd); + err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); if (err < 0) - netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err); + netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); return err; } @@ -402,8 +402,6 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, } const struct ethtool_ops dpaa_ethtool_ops = { - .get_settings = dpaa_get_settings, - .set_settings = dpaa_set_settings, .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, .set_msglevel = dpaa_set_msglevel, @@ -414,4 +412,6 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_sset_count = dpaa_get_sset_count, .get_ethtool_stats = dpaa_get_ethtool_stats, .get_strings = dpaa_get_strings, + .get_link_ksettings = dpaa_get_link_ksettings, + .set_link_ksettings = dpaa_set_link_ksettings, }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 38160c2bebcb..2cc552ddd8f0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) fec_enet_tx(ndev); if (pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, pkts); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } return pkts; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 1f98838f32b7..54e3ce9bd94c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (received < budget && tx_left) { /* done */ - napi_complete(napi); + napi_complete_done(napi, received); (*fep->ops->napi_enable)(dev); return received; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 957bfc220978..0ff166ec3e7e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) if (work_done < budget) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); @@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget) if (!num_act_queues) { u32 imask; - napi_complete(napi); + napi_complete_done(napi, work_done); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 9d660888510f..3f7ae9f64cd8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) howmany += ucc_geth_rx(ugeth, i, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 97b184774784..6e50ec82b3d8 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -555,7 +555,7 @@ refill: priv->reg_inten |= RCV_INT; writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); } - napi_complete(napi); + napi_complete_done(napi, rx); done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 979852d56f31..2c2808830e95 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hisi_femac_irq_enable(priv, DEF_INT_MASK & (~IRQ_INT_TX_PER_PACKET)); } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 418ca1f3774a..25a6c8722eca 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget) } while (ints & DEF_INT_MASK); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); hix5hd2_irq_enable(priv); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 672b64606321..fefe371e5907 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, skb->protocol = eth_type_trans(skb, ndev); (void)napi_gro_receive(&ring_data->napi, skb); - ndev->last_rx = jiffies; } static int hns_desc_unused(struct hnae_ring *ring) @@ -1203,43 +1202,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv) struct hns_nic_ring_data *rd; int i; int cpu; - cpumask_t mask; + cpumask_var_t mask; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return; /*diffrent irq banlance for 16core and 32core*/ if (h->q_num == num_possible_cpus()) { for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } else { for (i = 0; i < h->q_num; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } for (i = h->q_num; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; if (cpu_online(rd->queue_index * 2 + 1)) { - cpumask_clear(&mask); + cpumask_clear(mask); cpu = rd->queue_index * 2 + 1; - cpumask_set_cpu(cpu, &mask); + cpumask_set_cpu(cpu, mask); (void)irq_set_affinity_hint(rd->ring->irq, - &mask); + mask); } } } + + free_cpumask_var(mask); } static int hns_nic_init_irq(struct hns_nic_priv *priv) @@ -1625,8 +1629,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev) netdev_err(ndev, "sync uc address fail\n"); } -struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) +static void hns_nic_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { int idx = 0; u64 tx_bytes = 0; @@ -1668,8 +1672,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, stats->tx_window_errors = ndev->stats.tx_window_errors; stats->rx_compressed = ndev->stats.rx_compressed; stats->tx_compressed = ndev->stats.tx_compressed; - - return stats; } static u16 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c index 85a3866459cf..4f58d338d739 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -31,9 +31,11 @@ #include "ehea.h" #include "ehea_phyp.h" -static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); + u32 supported, advertising; u32 speed; int ret; @@ -60,68 +62,75 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = -1; break; /* BUG */ } - cmd->duplex = port->full_duplex == 1 ? + cmd->base.duplex = port->full_duplex == 1 ? DUPLEX_FULL : DUPLEX_HALF; } else { speed = SPEED_UNKNOWN; - cmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(cmd, speed); + cmd->base.speed = speed; - if (cmd->speed == SPEED_10000) { - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - cmd->port = PORT_FIBRE; + if (cmd->base.speed == SPEED_10000) { + supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->base.port = PORT_FIBRE; } else { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg | SUPPORTED_TP); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP); - cmd->port = PORT_TP; + cmd->base.port = PORT_TP; } - cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.autoneg = port->autoneg == 1 ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ehea_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ehea_port *port = netdev_priv(dev); int ret = 0; u32 sp; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { sp = EHEA_SPEED_AUTONEG; goto doit; } - switch (cmd->speed) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10M_F; else sp = H_SPEED_10M_H; break; case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_100M_F; else sp = H_SPEED_100M_H; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_1G_F; else ret = -EINVAL; break; case SPEED_10000: - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) sp = H_SPEED_10G_F; else ret = -EINVAL; @@ -264,7 +273,6 @@ static void ehea_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ehea_ethtool_ops = { - .get_settings = ehea_get_settings, .get_drvinfo = ehea_get_drvinfo, .get_msglevel = ehea_get_msglevel, .set_msglevel = ehea_set_msglevel, @@ -272,8 +280,9 @@ static const struct ethtool_ops ehea_ethtool_ops = { .get_strings = ehea_get_strings, .get_sset_count = ehea_get_sset_count, .get_ethtool_stats = ehea_get_ethtool_stats, - .set_settings = ehea_set_settings, .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ + .get_link_ksettings = ehea_get_link_ksettings, + .set_link_ksettings = ehea_set_link_ksettings, }; void ehea_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 702446a93697..1e53d7a82675 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -328,8 +328,8 @@ out: spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); } -static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void ehea_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ehea_port *port = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; @@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, stats->multicast = port->stats.multicast; stats->rx_errors = port->stats.rx_errors; - return stats; } static void ehea_update_stats(struct work_struct *work) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5909615c27f7..6ead2335a169 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1991,69 +1991,79 @@ static struct mal_commac_ops emac_commac_sg_ops = { }; /* Ethtool support */ -static int emac_ethtool_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int emac_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); + u32 supported, advertising; - cmd->supported = dev->phy.features; - cmd->port = PORT_MII; - cmd->phy_address = dev->phy.address; - cmd->transceiver = - dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + supported = dev->phy.features; + cmd->base.port = PORT_MII; + cmd->base.phy_address = dev->phy.address; mutex_lock(&dev->link_lock); - cmd->advertising = dev->phy.advertising; - cmd->autoneg = dev->phy.autoneg; - cmd->speed = dev->phy.speed; - cmd->duplex = dev->phy.duplex; + advertising = dev->phy.advertising; + cmd->base.autoneg = dev->phy.autoneg; + cmd->base.speed = dev->phy.speed; + cmd->base.duplex = dev->phy.duplex; mutex_unlock(&dev->link_lock); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int emac_ethtool_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) +static int +emac_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct emac_instance *dev = netdev_priv(ndev); u32 f = dev->phy.features; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, - cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); /* Basic sanity checks */ if (dev->phy.address < 0) return -EOPNOTSUPP; - if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0) return -EINVAL; - if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE) { - switch (cmd->speed) { + if (cmd->base.autoneg == AUTONEG_DISABLE) { + switch (cmd->base.speed) { case SPEED_10: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_10baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_10baseT_Full)) return -EINVAL; break; case SPEED_100: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_100baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_100baseT_Full)) return -EINVAL; break; case SPEED_1000: - if (cmd->duplex == DUPLEX_HALF && + if (cmd->base.duplex == DUPLEX_HALF && !(f & SUPPORTED_1000baseT_Half)) return -EINVAL; - if (cmd->duplex == DUPLEX_FULL && + if (cmd->base.duplex == DUPLEX_FULL && !(f & SUPPORTED_1000baseT_Full)) return -EINVAL; break; @@ -2062,8 +2072,8 @@ static int emac_ethtool_set_settings(struct net_device *ndev, } mutex_lock(&dev->link_lock); - dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, - cmd->duplex); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed, + cmd->base.duplex); mutex_unlock(&dev->link_lock); } else { @@ -2072,7 +2082,7 @@ static int emac_ethtool_set_settings(struct net_device *ndev, mutex_lock(&dev->link_lock); dev->phy.def->ops->setup_aneg(&dev->phy, - (cmd->advertising & f) | + (advertising & f) | (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))); @@ -2234,8 +2244,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, } static const struct ethtool_ops emac_ethtool_ops = { - .get_settings = emac_ethtool_get_settings, - .set_settings = emac_ethtool_set_settings, .get_drvinfo = emac_ethtool_get_drvinfo, .get_regs_len = emac_ethtool_get_regs_len, @@ -2251,6 +2259,8 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_ethtool_stats = emac_ethtool_get_ethtool_stats, .get_link = ethtool_op_get_link, + .get_link_ksettings = emac_ethtool_get_link_ksettings, + .set_link_ksettings = emac_ethtool_set_link_ksettings, }; static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index aaf6fec566b5..cd3227b088b7 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget) int n; if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; - n = mc->ops->poll_rx(mc->dev, budget); + n = mc->ops->poll_rx(mc->dev, budget - received); if (n) { received += n; - budget -= n; - if (budget <= 0) - goto more_work; // XXX What if this is the last one ? + if (received >= budget) + return budget; } } - /* We need to disable IRQs to protect from RXDE IRQ here */ - spin_lock_irqsave(&mal->lock, flags); - __napi_complete(napi); - mal_enable_eob_irq(mal); - spin_unlock_irqrestore(&mal->lock, flags); + if (napi_complete_done(napi, received)) { + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + } /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 309f5c66083c..72ab7b6bf20b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -729,20 +729,26 @@ static int ibmveth_close(struct net_device *netdev) return 0; } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -978,11 +984,11 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev, static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, .get_link = ethtool_op_get_link, .get_strings = ibmveth_get_strings, .get_sset_count = ibmveth_get_sset_count, .get_ethtool_stats = ibmveth_get_ethtool_stats, + .get_link_ksettings = netdev_get_link_ksettings, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -1320,7 +1326,7 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { - napi_complete(napi); + napi_complete_done(napi, frames_processed); /* We think we are done - reenable interrupts, * then check once more to make sure we are done. diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c12596676bbb..c46935d4a3fe 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -987,7 +987,7 @@ restart_poll: if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - napi_complete(napi); + napi_complete_done(napi, frames_processed); if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && napi_reschedule(napi)) { disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@ -1025,21 +1025,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = { /* ethtool functions */ -static int ibmvnic_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int ibmvnic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + u32 supported, advertising; + + supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_FIBRE; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1132,7 +1137,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops ibmvnic_ethtool_ops = { - .get_settings = ibmvnic_get_settings, .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, .set_msglevel = ibmvnic_set_msglevel, @@ -1141,6 +1145,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, + .get_link_ksettings = ibmvnic_get_link_ksettings, }; /* Routines for managing CRQs/sCRQs */ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 25c6dfd500b4..2b7323d392dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); e100_enable_irq(nic); } @@ -2426,19 +2426,21 @@ err_clean_rx: #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ -static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); - return mii_ethtool_gset(&nic->mii, cmd); + return mii_ethtool_get_link_ksettings(&nic->mii, cmd); } -static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct nic *nic = netdev_priv(netdev); int err; mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); - err = mii_ethtool_sset(&nic->mii, cmd); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); e100_exec_cb(nic, NULL, e100_configure); return err; @@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static const struct ethtool_ops e100_ethtool_ops = { - .get_settings = e100_get_settings, - .set_settings = e100_set_settings, .get_drvinfo = e100_get_drvinfo, .get_regs_len = e100_get_regs_len, .get_regs = e100_get_regs, @@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = { .get_ethtool_stats = e100_get_ethtool_stats, .get_sset_count = e100_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 879cca47b021..a29b12e80855 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring); int e1000e_setup_tx_resources(struct e1000_ring *ring); void e1000e_free_rx_resources(struct e1000_ring *ring); void e1000e_free_tx_resources(struct e1000_ring *ring); -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats); +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); void e1000e_get_hw_control(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index eccf1da9356b..2175cced402f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ @@ -5920,12 +5920,11 @@ static void e1000_reset_task(struct work_struct *work) * * Returns the address of the device statistics structure. **/ -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +void e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); - memset(stats, 0, sizeof(struct rtnl_link_stats64)); spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); /* Fill out the OS statistics structure */ @@ -5958,7 +5957,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, /* Tx Dropped needs to be maintained elsewhere */ spin_unlock(&adapter->stats64_lock); - return stats; } /** @@ -6276,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev) /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); } - e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 4d19e46f7c55..52b979443cde 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -260,9 +260,7 @@ struct fm10k_intfc { #define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) #define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) #define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) -#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3)) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4)) -#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5)) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3)) int xcast_mode; /* Tx fast path data */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index dd95ac4f4c64..62a6ad9b3eed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; /* if we somehow dropped the Tx enable we should reset */ - if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { ret_val = FM10K_ERR_RESET_REQUESTED; goto out; } @@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) /* interface cannot receive traffic without logical ports */ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { - if (hw->mac.ops.request_lport_map) - ret_val = hw->mac.ops.request_lport_map(hw); + if (mac->ops.request_lport_map) + ret_val = mac->ops.request_lport_map(hw); goto out; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 5241e0873397..0c84fef750f4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -148,7 +148,7 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(char **p, const char *prefix, +static void fm10k_add_stat_strings(u8 **p, const char *prefix, const struct fm10k_stats stats[], const unsigned int size) { @@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix, static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); - char *p = (char *)data; unsigned int i; - fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats, FM10K_NETDEV_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats, FM10K_GLOBAL_STATS_LEN); - fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats, FM10K_MBX_STATS_LEN); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, + fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats, FM10K_PF_STATS_LEN); for (i = 0; i < interface->hw.mac.max_queues; i++) { char prefix[ETH_GSTRING_LEN]; snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); - fm10k_add_stat_strings(&p, prefix, + fm10k_add_stat_strings(&data, prefix, fm10k_gstrings_queue_stats, FM10K_QUEUE_STATS_LEN); } @@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - char *p = (char *)data; - switch (stringset) { case ETH_SS_TEST: - memcpy(data, *fm10k_gstrings_test, + memcpy(data, fm10k_gstrings_test, FM10K_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: fm10k_get_stat_strings(dev, data); break; case ETH_SS_PRIV_FLAGS: - memcpy(p, fm10k_prv_flags, + memcpy(data, fm10k_prv_flags, FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN); break; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5de937852436..5bb233a9614c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.21.2-k" +#define DRV_VERSION "0.21.7-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; @@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_buffer: buffer containing page to add + * @size: packet size from rx_desc * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * @@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, * true if the buffer can be reused by the interface. **/ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, + unsigned int size, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->w.length); #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else @@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->w.length); struct fm10k_rx_buffer *rx_buffer; struct page *page; @@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - FM10K_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { + if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ fm10k_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -473,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, fm10k_rx_checksum(rx_ring, rx_desc, skb); + FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; + FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; skb_record_rx_queue(skb, rx_ring->queue_index); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index c9dfa6564fcf..334088a101c3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, * function can also be used to respond to an error as the connection * resetting would also be a means of dealing with errors. **/ -static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, - struct fm10k_mbx_info *mbx) +static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) { + s32 err = 0; const enum fm10k_mbx_state state = mbx->state; switch (state) { @@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_OPEN: /* flush any incomplete work */ fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; break; case FM10K_STATE_CONNECT: /* Update remote value to match local value */ @@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, } fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; } /** @@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { case 0: - fm10k_sm_mbx_process_reset(hw, mbx); + err = fm10k_sm_mbx_process_reset(hw, mbx); break; case FM10K_SM_MBX_VERSION: err = fm10k_sm_mbx_process_version_1(hw, mbx); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index bc5ef6eb3dd6..01db688cf539 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This * function replaces fm10k_get_stats for kernels which support it. */ -static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void fm10k_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *ring; @@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, /* following stats updated by fm10k_service_task() */ stats->rx_missed_errors = netdev->stats.rx_missed_errors; - - return stats; } int fm10k_setup_tc(struct net_device *dev, u8 tc) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b1a2f8437d59..e372a5823480 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) struct fm10k_hw *hw = &interface->hw; struct fm10k_mbx_info *mbx = &hw->mbx; u32 eicr; + s32 err = 0; /* unmask any set bits related to this interrupt */ eicr = fm10k_read_reg(hw, FM10K_EICR); @@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { - mbx->ops.process(hw, mbx); + err = mbx->ops.process(hw, mbx); /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } + if (err == FM10K_ERR_RESET_REQUESTED) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 23fb319fd2a0..40ee0242a80a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -72,10 +72,6 @@ force_reset: fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); - /* Reset mailbox global interrupts */ - reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; - fm10k_write_reg(hw, FM10K_GMBX, reg); - /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ba8d30984bee..fdd9069b6cec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -134,19 +134,6 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) -/** - * i40e_is_mac_710 - Return true if MAC is X710/XL710 - * @hw: ptr to the hardware info - **/ -static inline bool i40e_is_mac_710(struct i40e_hw *hw) -{ - if ((hw->mac.type == I40E_MAC_X710) || - (hw->mac.type == I40E_MAC_XL710)) - return true; - - return false; -} - /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -762,6 +749,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f); void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan); int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, @@ -804,7 +792,6 @@ int i40e_lan_add_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf); void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); @@ -834,9 +821,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *storage); +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *storage); int i40e_set_mac(struct net_device *netdev, void *p); void i40e_set_rx_mode(struct net_device *netdev); #endif @@ -853,12 +839,12 @@ int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid); void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid); -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr); -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr); +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid); +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr); +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); #ifdef I40E_FCOE diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 7fe72abc0b4a..7ca048f0b159 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -201,41 +201,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) } /** - * i40e_notify_client_of_netdev_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) -{ - struct i40e_client_instance *cdev; - int ret = 0; - - if (!vsi) - return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.netdev == vsi->netdev) { - if (!cdev->client || - !cdev->client->ops || !cdev->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open routine\n"); - continue; - } - if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state))) { - ret = cdev->client->ops->open(&cdev->lan_info, - cdev->client); - if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state); - } - } - } - mutex_unlock(&i40e_client_instance_mutex); -} - -/** * i40e_client_release_qvlist * @ldev: pointer to L2 context. * diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index cc1465aac2ef..c4ab3c1ae02a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2072,7 +2072,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector; u16 vector, intrl; - intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; @@ -2116,6 +2116,7 @@ static int __i40e_set_coalesce(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + u16 intrl_reg; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) @@ -2127,8 +2128,9 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n"); + if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", + INTRL_REG_TO_USEC(I40E_MAX_INTRL)); return -EINVAL; } @@ -2141,7 +2143,12 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - vsi->int_rate_limit = ec->rx_coalesce_usecs_high; + intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); + vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); + if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { + netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n", + vsi->int_rate_limit); + } if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ad4cf639430e..9f785c015a2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -409,15 +409,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ -#ifdef I40E_FCOE -struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) -#else -static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( - struct net_device *netdev, - struct rtnl_link_stats64 *stats) +#ifndef I40E_FCOE +static #endif +void i40e_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_ring *tx_ring, *rx_ring; @@ -426,10 +422,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( int i; if (test_bit(__I40E_DOWN, &vsi->state)) - return stats; + return; if (!vsi->tx_rings) - return stats; + return; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { @@ -469,8 +465,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; - - return stats; } /** @@ -1440,7 +1434,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ -static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) +void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) { if (!f) return; @@ -1483,18 +1477,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) } /** - * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans + * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * - * Goes through all the macvlan filters and adds a macvlan filter for each + * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, + * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL **/ -struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, - const u8 *macaddr) +struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, + const u8 *macaddr) { struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; @@ -1504,6 +1499,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); + if (!i40e_is_vsi_in_vlan(vsi)) + return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; @@ -1516,15 +1514,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, } /** - * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * - * Removes a given MAC address from a VSI, regardless of VLAN + * Removes a given MAC address from a VSI regardless of what VLAN it has been + * associated with. * * Returns 0 for success, or error **/ -int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr) +int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; struct hlist_node *h; @@ -1585,8 +1584,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_del_mac_all_vlan(vsi, netdev->dev_addr); - i40e_put_mac_in_vlan(vsi, addr->sa_data); + i40e_del_mac_filter(vsi, netdev->dev_addr); + i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { @@ -1762,14 +1761,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_mac_filter *f; - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, addr); - else - f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY); - - if (f) + if (i40e_add_mac_filter(vsi, addr)) return 0; else return -ENOMEM; @@ -1788,10 +1781,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - if (i40e_is_vsi_in_vlan(vsi)) - i40e_del_mac_all_vlan(vsi, addr); - else - i40e_del_filter(vsi, addr, I40E_VLAN_ANY); + i40e_del_mac_filter(vsi, addr); return 0; } @@ -2574,12 +2564,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be added (0 = untagged only , -1 = any) + * @vid: VLAN id to be added **/ -int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; + if (!vid || vsi->info.pvid) + return -EINVAL; + /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); @@ -2622,10 +2615,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) /** * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be removed (0 = untagged only , -1 = any) + * @vid: VLAN id to be removed **/ -void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) +void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { + if (!vid || vsi->info.pvid) + return; + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_rm_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -3272,7 +3268,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); wr32(hw, I40E_PFINT_RATEN(vector - 1), - INTRL_USEC_TO_REG(vsi->int_rate_limit)); + i40e_intrl_usec_to_reg(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); @@ -8688,7 +8684,7 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { pf->flags |= I40E_FLAG_RESTART_AUTONEG; @@ -8697,13 +8693,13 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* Disable FW LLDP if FW < v4.3 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) pf->flags |= I40E_FLAG_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ - if (i40e_is_mac_710(&pf->hw) && + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; @@ -9345,7 +9341,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ @@ -9354,7 +9350,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) random_ether_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } @@ -9373,7 +9369,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) */ eth_broadcast_addr(broadcast); spin_lock_bh(&vsi->mac_filter_hash_lock); - i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY); + i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); ether_addr_copy(netdev->dev_addr, mac_addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 5b6feb7edeb1..be74bcf9c961 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -55,7 +55,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40e_allocate_dma_mem_d(h, m, s, a) @@ -64,7 +64,7 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 352cf7cd2ef4..f5baeb154d39 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2251,14 +2251,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -2271,6 +2273,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -2335,10 +2338,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -2699,7 +2710,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -2708,15 +2718,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2902,8 +2903,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2919,6 +2922,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2926,16 +2935,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2973,7 +2979,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e065321ce8ed..1ea820e9debe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -52,7 +52,20 @@ */ #define INTRL_ENA BIT(6) #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) -#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +/** + * i40e_intrl_usec_to_reg - convert interrupt rate limit to register + * @intrl: interrupt rate limit to convert + * + * This function converts a decimal interrupt rate limit to the appropriate + * register format expected by the firmware when setting interrupt rate limit. + */ +static inline u16 i40e_intrl_usec_to_reg(int intrl) +{ + if (intrl >> 2) + return ((intrl >> 2) | INTRL_ENA); + else + return 0; +} #define I40E_INTRL_8K 125 /* 8000 ints/sec */ #define I40E_INTRL_62K 16 /* 62500 ints/sec */ #define I40E_INTRL_83K 12 /* 83333 ints/sec */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index edc0abdf4783..b6cf8d2670a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -125,7 +125,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a6198b727e24..cbbf8648307a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) spin_lock_bh(&vsi->mac_filter_hash_lock); if (is_valid_ether_addr(vf->default_lan_addr.addr)) { - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? - vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, + vf->default_lan_addr.addr); if (!f) dev_info(&pf->pdev->dev, "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } eth_broadcast_addr(broadcast); - f = i40e_add_filter(vsi, broadcast, - vf->port_vlan_id ? vf->port_vlan_id : -1); + f = i40e_add_mac_filter(vsi, broadcast); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); @@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr); - if (!f) { - if (i40e_is_vsi_in_vlan(vsi)) - f = i40e_put_mac_in_vlan(vsi, al->list[i].addr); - else - f = i40e_add_filter(vsi, al->list[i].addr, -1); - } + if (!f) + f = i40e_add_mac_filter(vsi, al->list[i].addr); if (!f) { dev_err(&pf->pdev->dev, @@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) { + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* delete the temporary mac address */ if (!is_zero_ether_addr(vf->default_lan_addr.addr)) - i40e_del_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1); + i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); /* Delete all the filters for this VSI - we're going to kill it * anyway. */ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) - i40e_del_filter(vsi, f->macaddr, f->vlan); + __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index aa63b7fb993d..b5a59dd72a0c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: hw->mac.type = I40E_MAC_X722_VF; break; case I40E_DEV_ID_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 21dcaee1ad1d..d76393c95056 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -48,7 +48,6 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index df67ef37b7f3..d4e488267988 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1549,14 +1549,16 @@ out: /** * i40e_tso - set up the tso context descriptor - * @skb: ptr to the skb we're sending + * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) { + struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; @@ -1569,6 +1571,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) unsigned char *hdr; } l4; u32 paylen, l4_offset; + u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1633,10 +1636,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; - cd_mss = skb_shinfo(skb)->gso_size; + cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); @@ -1949,7 +1960,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 gso_segs; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { @@ -1958,15 +1968,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TX_FLAGS_VLAN_SHIFT; } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; - - /* multiply data chunks by size of headers */ - first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); - first->gso_segs = gso_segs; - first->skb = skb; first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -2151,8 +2152,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) - goto out_drop; + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2168,6 +2171,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* prepare the xmit flags */ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; @@ -2175,16 +2184,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; @@ -2211,7 +2217,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c85e8a31c072..92ac60da5201 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -100,7 +100,6 @@ enum i40e_debug_mask { */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, - I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c0fc53361800..3fe87e021148 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0}, /* required last entry */ {0, } }; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a61447fd778e..ee443985581f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 8aa798737d4d..07d48f2e3369 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw) ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val) - hw_dbg("Flash update complete\n"); - else hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); out: return ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5010e2232c50..5eff82678f0b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ - if (hw->mac.type == e1000_i350) { + if (hw->mac.type == e1000_i350) lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG - + lan_offset, 1, &nvm_data); - } else { - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, - 1, &nvm_data); - } + else + lan_offset = 0; + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; @@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 5b54254aed4f..2788a5409023 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_id; + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) goto out; diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index d84afdd83e53..58adbf234e07 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -320,7 +320,7 @@ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ #define E1000_MANC 0x05820 /* Management Control - RW */ #define E1000_IPAV 0x05838 /* IP Address Valid - RW */ #define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1515abaa5ac9..be456bae8169 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter, bool set); @@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, dev_trans_start(netdev), netdev->last_rx); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); } /* Print Registers */ @@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - return __igb_close(netdev, false); + if (netif_device_present(netdev)) + return __igb_close(netdev, false); + return 0; } /** @@ -3394,7 +3396,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); - ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); wr32(E1000_TDH(reg_idx), 0); writel(0, ring->tail); @@ -3733,7 +3735,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ - ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); wr32(E1000_RDH(reg_idx), 0); writel(0, ring->tail); @@ -5402,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work) * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer **/ -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -5411,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, igb_update_stats(adapter, &adapter->stats64); memcpy(stats, &adapter->stats64, sizeof(*stats)); spin_unlock(&adapter->stats64_lock); - - return stats; } /** @@ -7564,6 +7564,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) @@ -7572,6 +7573,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_ptp_suspend(adapter); igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -7690,16 +7692,15 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - rtnl_lock(); + rtnl_lock(); + if (!err && netif_running(netdev)) err = __igb_open(netdev, true); - rtnl_unlock(); - if (err) - return err; - } - netif_device_attach(netdev); - return 0; + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; } static int igb_runtime_idle(struct device *dev) @@ -7898,6 +7899,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + igb_reset(adapter); wr32(E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5826b1ddedcf..fbd220d137b3 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (!test_bit(__IXGB_DOWN, &adapter->flags)) ixgb_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ef81c3d8c295..e83444c34cf9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -55,9 +55,6 @@ #include <net/busy_poll.h> -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BP_EXTENDED_STATS -#endif /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -159,6 +156,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; struct vf_macvlans { @@ -200,11 +198,6 @@ struct ixgbe_rx_buffer { struct ixgbe_queue_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif /* BP_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -398,127 +391,10 @@ struct ixgbe_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; -#ifdef CONFIG_NET_RX_BUSY_POLL - atomic_t state; -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ixgbe_qv_state_t { - IXGBE_QV_STATE_IDLE = 0, - IXGBE_QV_STATE_NAPI, - IXGBE_QV_STATE_POLL, - IXGBE_QV_STATE_DISABLE -}; - -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_NAPI); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; -#endif - - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); - - /* flush any outstanding Rx frames */ - if (q_vector->napi.gro_list) - napi_gro_flush(&q_vector->napi, false); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* called from ixgbe_low_latency_poll() */ -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_POLL); -#ifdef BP_EXTENDED_STATS - if (rc != IXGBE_QV_STATE_IDLE) - q_vector->rx.ring->stats.yields++; -#endif - return rc == IXGBE_QV_STATE_IDLE; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); - - /* reset state to idle */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; -} - -/* false if QV is currently owned */ -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, - IXGBE_QV_STATE_DISABLE); - - return rc == IXGBE_QV_STATE_IDLE; -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) -{ -} - -static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) -{ - return false; -} - -static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) -{ - return true; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - #ifdef CONFIG_IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 @@ -661,6 +537,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) +#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define IXGBE_FLAG2_EEE_ENABLED BIT(15) /* Tx fast path data */ int num_tx_queues; @@ -862,6 +740,7 @@ enum ixgbe_boards { board_X550, board_X550EM_x, board_x550em_a, + board_x550em_a_fw, }; extern const struct ixgbe_info ixgbe_82598_info; @@ -870,6 +749,7 @@ extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_a_info; +extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 805ab319e578..523f9d05a810 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: phy->ops.reset = &ixgbe_reset_phy_nl; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e00aaeb91827..30535e6b68f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; break; default: break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 8832df3eba25..c38d50c1fcf7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: supported = true; break; default: @@ -348,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); -#ifndef CONFIG_SPARC +#ifndef CONFIG_ARCH_WANT_RELAX_ORDER /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { u32 regval; @@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, else *speed = IXGBE_LINK_SPEED_100_FULL; break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + *speed = IXGBE_LINK_SPEED_10_FULL; + } + break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } @@ -3578,7 +3587,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; @@ -3593,43 +3602,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) } /** - * ixgbe_host_interface_command - Issue command to manageability block + * ixgbe_hic_unlocked - Issue command to manageability block unlocked * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed + * @buffer: command to write and where the return status will be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return 0 + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, - u32 length, u32 timeout, - bool return_data) +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, + u32 timeout) { - u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u32 hicr, i, bi, fwsts; - u16 buf_len, dword_len; - union { - struct ixgbe_hic_hdr hdr; - u32 u32arr[1]; - } *bp = buffer; - s32 status; + u32 hicr, i, fwsts; + u16 dword_len; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Take management host interface semaphore */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); - if (status) - return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3639,15 +3634,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - status = IXGBE_ERR_INVALID_ARGUMENT; - goto rel_out; + return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; @@ -3657,7 +3650,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(bp->u32arr[i])); + i, cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3671,11 +3664,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, /* Check command successful completion. */ if ((timeout && i == timeout) || - !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { - hw_dbg(hw, "Command has failed with no status valid.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto rel_out; + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + + return 0; +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, + u32 length, u32 timeout, + bool return_data) +{ + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + u16 buf_len, dword_len; + s32 status; + u32 bi; + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, buffer, length, timeout); + if (status) + goto rel_out; if (!return_data) goto rel_out; @@ -3722,6 +3758,8 @@ rel_out: * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return 0 @@ -3729,7 +3767,8 @@ rel_out: * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub) + u8 build, u8 sub, __always_unused u16 len, + __always_unused const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 5b3e3c65927e..e083732adf64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -111,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver); + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index fd192bf29b26..ee28e54a4f75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -197,15 +197,17 @@ static int ixgbe_get_settings(struct net_device *netdev, SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; + ecmd->supported |= SUPPORTED_100baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10_FULL) + ecmd->supported |= SUPPORTED_10baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ ecmd->advertising = ecmd->supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { ecmd->advertising = 0; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) @@ -237,6 +239,7 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_x550em_ext_t: + case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; @@ -346,6 +349,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_100_FULL: ethtool_cmd_speed_set(ecmd, SPEED_100); break; + case IXGBE_LINK_SPEED_10_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10); + break; default: break; } @@ -394,6 +400,9 @@ static int ixgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_100baseT_Full) advertised |= IXGBE_LINK_SPEED_100_FULL; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + advertised |= IXGBE_LINK_SPEED_10_FULL; + if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ @@ -1170,12 +1179,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1185,12 +1188,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; @@ -1198,12 +1195,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = 0; - data[i+1] = 0; - data[i+2] = 0; - i += 3; -#endif continue; } @@ -1213,12 +1204,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i+1] = ring->stats.misses; - data[i+2] = ring->stats.cleaned; - i += 3; -#endif } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1255,28 +1240,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); @@ -3173,6 +3142,9 @@ static int ixgbe_get_module_info(struct net_device *dev, u8 sff8472_rev, addr_mode; bool page_swap = false; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, @@ -3218,6 +3190,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, if (ee->len == 0) return -EINVAL; + if (hw->phy.type == ixgbe_phy_fw) + return -ENXIO; + for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) @@ -3237,6 +3212,136 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, return 0; } +static const struct { + ixgbe_link_speed mac_speed; + u32 supported; +} ixgbe_ls_map[] = { + { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, + { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, + { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, + { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, + { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, +}; + +static const struct { + u32 lp_advertised; + u32 mac_speed; +} ixgbe_lp_map[] = { + { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, + { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, + { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, + { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, + { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, + { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, +}; + +static int +ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + s32 rc; + u16 i; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); + if (rc) + return rc; + + edata->lp_advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { + if (info[0] & ixgbe_lp_map[i].lp_advertised) + edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; + } + + edata->supported = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) + edata->supported |= ixgbe_ls_map[i].supported; + } + + edata->advertised = 0; + for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { + if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) + edata->advertised |= ixgbe_ls_map[i].supported; + } + + edata->eee_enabled = !!edata->advertised; + edata->tx_lpi_enabled = edata->eee_enabled; + if (edata->advertised & edata->lp_advertised) + edata->eee_active = true; + + return 0; +} + +static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) + return ixgbe_get_eee_fw(adapter, edata); + + return -EOPNOTSUPP; +} + +static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ixgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err(drv, "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_err(drv, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_err(drv, + "Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) { + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = + hw->phy.eee_speeds_supported; + } else { + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + hw->phy.eee_speeds_advertised = 0; + } + + /* reset link */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -3269,6 +3374,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, + .get_eee = ixgbe_get_eee, + .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 15ab337fd7ad..1b8be7d813bd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +#define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; @@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = IXGBE_RSS_16Q_MASK; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -847,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); -#ifdef CONFIG_NET_RX_BUSY_POLL - /* initialize busy poll */ - atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); - -#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1e2f39ebd824..86135c00d4b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, [board_x550em_a] = &ixgbe_x550em_a_info, + [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, /* required last entry */ {0, } }; @@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) @@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", + "trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, - dev_trans_start(netdev), - netdev->last_rx); + dev_trans_start(netdev)); } /* Print Registers */ @@ -1717,11 +1720,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - skb_mark_napi_id(skb, &q_vector->napi); - if (ixgbe_qv_busy_polling(q_vector)) - netif_receive_skb(skb); - else - napi_gro_receive(&q_vector->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -2198,40 +2197,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, return total_rx_packets; } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbe_low_latency_recv(struct napi_struct *napi) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int found = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbe_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbe_for_each_ring(ring, q_vector->rx) { - found = ixgbe_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbe_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2447,6 +2412,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; + s32 rc; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; @@ -2485,6 +2451,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) return; break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + rc = hw->phy.ops.check_overtemp(hw); + if (rc != IXGBE_ERR_OVERTEMP) + return; + break; default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; @@ -2531,6 +2503,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } return; + case ixgbe_mac_x550em_a: + if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + IXGBE_EICR_GPI_SDP0_X550EM_a); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X550EM_a); + } + return; + case ixgbe_mac_X550: case ixgbe_mac_X540: if (!(eicr & IXGBE_EICR_TS)) return; @@ -2856,8 +2840,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - /* Exit if we are called by netpoll or busy polling is active */ - if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll */ + if (budget <= 0) return budget; /* attempt to distribute budget to each queue fairly, but don't allow @@ -2876,7 +2860,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget) clean_complete = false; } - ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -4559,23 +4542,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { - ixgbe_qv_init_lock(adapter->q_vector[q_idx]); + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_enable(&adapter->q_vector[q_idx]->napi); - } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } - } } static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) @@ -5294,6 +5270,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); + if (adapter->hw.phy.type == ixgbe_phy_fw) + ixgbe_watchdog_link_is_down(adapter); ixgbe_down(adapter); /* * If SR-IOV enabled then wait a bit before bringing the adapter @@ -5554,6 +5532,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** + * ixgbe_eee_capable - helper function to determine EEE support on X550 + * @adapter: board private structure + */ +static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!hw->phy.eee_speeds_supported) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; + if (!hw->phy.eee_speeds_advertised) + break; + adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; + break; + default: + adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; + break; + } +} + +/** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ @@ -5717,6 +5720,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, break; case ixgbe_mac_x550em_a: adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + default: + break; + } /* fall through */ case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB @@ -5730,6 +5741,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* Fall Through */ case ixgbe_mac_X550: + if (hw->mac.type == ixgbe_mac_X550) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif @@ -6200,7 +6213,8 @@ int ixgbe_close(struct net_device *netdev) ixgbe_ptp_stop(adapter); - ixgbe_close_suspend(adapter); + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); @@ -6245,14 +6259,12 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - rtnl_unlock(); - - if (err) - return err; - netif_device_attach(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); - return 0; + return err; } #endif /* CONFIG_PM */ @@ -6267,14 +6279,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -6808,6 +6820,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case IXGBE_LINK_SPEED_100_FULL: speed_str = "100 Mbps"; break; + case IXGBE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; default: speed_str = "unknown speed"; break; @@ -8111,8 +8126,9 @@ static void ixgbe_netpoll(struct net_device *netdev) } #endif -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + +static void ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; @@ -8150,13 +8166,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, } } rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; stats->rx_length_errors = netdev->stats.rx_length_errors; stats->rx_crc_errors = netdev->stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - return stats; } #ifdef CONFIG_IXGBE_DCB @@ -9290,9 +9306,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbe_low_latency_recv, -#endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, @@ -9596,6 +9609,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; + ixgbe_set_eee_capable(adapter); if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { @@ -9833,8 +9847,9 @@ skip_sriov: * since os does not support feature */ if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, - 0xFF); + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, + sizeof(ixgbe_driver_version) - 1, + ixgbe_driver_version); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); @@ -10082,7 +10097,7 @@ skip_bad_vf_detection: } if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_close_suspend(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -10152,10 +10167,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } #endif + rtnl_lock(); if (netif_running(netdev)) - ixgbe_up(adapter); + ixgbe_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 01c2667c0f92..811cb4f64a5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 3b8362085f57..2fcde8777a29 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 10; + int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; @@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) */ for (i = 0; i < 30; i++) { msleep(100); - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl); - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); - break; + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + MDIO_MMD_PMAPMD, &ctrl); + if (status) + return status; + + if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + udelay(2); + break; + } + } else { + status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (status) + return status; + + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } } } @@ -767,6 +784,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + if (speed & IXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; + /* Setup link based on the new speed settings */ hw->phy.ops.setup_link(hw); @@ -960,40 +980,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) } /** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ @@ -1738,6 +1724,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index ecf05f838fc5..5aa2c3cf7aec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 1efb404431e9..ef0635e0918c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7e5d9850e4b2..044cb44747cf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -934,7 +935,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, IXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); @@ -978,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1002,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -1; @@ -1041,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } /* This mailbox command is supported (required) only for 82599 and x540 * VFs which support up to 4 RSS queues. Therefore we will compress the @@ -1068,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EPERM; /* verify the PF is supporting the correct API */ - if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + break; + default: return -EOPNOTSUPP; + } memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); @@ -1081,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, { struct ixgbe_hw *hw = &adapter->hw; int xcast_mode = msgbuf[1]; - u32 vmolr, disable, enable; + u32 vmolr, fctrl, disable, enable; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -1101,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = 0; break; case IXGBEVF_XCAST_MODE_MULTI: - disable = IXGBE_VMOLR_MPE; + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; break; case IXGBEVF_XCAST_MODE_ALLMULTI: - disable = 0; + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -EOPNOTSUPP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + e_warn(drv, + "Enabling VF promisc requires PF in promisc\n"); + return -EPERM; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index cf21273db201..1d07f2ead914 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -92,6 +92,8 @@ #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED @@ -1499,6 +1501,8 @@ enum { #define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) /* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ #define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ #define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ #define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ @@ -1914,6 +1918,7 @@ enum { #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 #define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0 #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ @@ -2619,6 +2624,7 @@ enum ixgbe_fdir_pballoc_type { #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ #define FW_READ_SHADOW_RAM_CMD 0x31 #define FW_READ_SHADOW_RAM_LEN 0x6 #define FW_WRITE_SHADOW_RAM_CMD 0x33 @@ -2644,6 +2650,59 @@ enum ixgbe_fdir_pballoc_type { #define FW_INT_PHY_REQ_LEN 10 #define FW_INT_PHY_REQ_READ 0 #define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 BIT(0) +#define FW_PHY_ACT_LINK_SPEED_100 BIT(1) +#define FW_PHY_ACT_LINK_SPEED_1G BIT(2) +#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3) +#define FW_PHY_ACT_LINK_SPEED_5G BIT(4) +#define FW_PHY_ACT_LINK_SPEED_10G BIT(5) +#define FW_PHY_ACT_LINK_SPEED_20G BIT(6) +#define FW_PHY_ACT_LINK_SPEED_25G BIT(7) +#define FW_PHY_ACT_LINK_SPEED_40G BIT(8) +#define FW_PHY_ACT_LINK_SPEED_50G BIT(9) +#define FW_PHY_ACT_LINK_SPEED_100G BIT(10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \ + HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP BIT(18) +#define FW_PHY_ACT_SETUP_LINK_HP BIT(19) +#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20) +#define FW_PHY_ACT_SETUP_LINK_AN BIT(22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4) +#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3) +#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2686,6 +2745,16 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + /* These need to be dword aligned */ struct ixgbe_hic_read_shadow_ram { union ixgbe_hic_hdr2 hdr; @@ -2734,6 +2803,19 @@ struct ixgbe_hic_internal_phy_resp { __be32 read_data; }; +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2849,6 +2931,7 @@ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 @@ -3064,6 +3147,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, ixgbe_phy_sgmii, + ixgbe_phy_fw, ixgbe_phy_generic }; @@ -3362,7 +3446,8 @@ struct ixgbe_mac_operations { void (*fc_autoneg)(struct ixgbe_hw *); /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); @@ -3392,7 +3477,6 @@ struct ixgbe_phy_operations { s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); @@ -3478,6 +3562,8 @@ struct ixgbe_phy_info { bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e2ff823ee202..84a467a8ed3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ixgbe_link_speed speed; bool link_up; - /* - * Link should be up in order for the blink bit in the LED control + if (index > 3) + return IXGBE_ERR_PARAM; + + /* Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ @@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) u32 macc_reg; u32 ledctl_reg; + if (index > 3) + return IXGBE_ERR_PARAM; + /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -913,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = { .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, .set_phy_power = &ixgbe_set_copper_phy_power, - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, }; static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 11fb433eb924..200f847fd8f3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) return 0; } +static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ @@ -402,6 +414,204 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); } +/** + * ixgbe_fw_phy_activity - Perform an activity on a PHY + * @hw: pointer to hardware structure + * @activity: activity to perform + * @data: Pointer to 4 32-bit words of data + */ +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]) +{ + union { + struct ixgbe_hic_phy_activity_req cmd; + struct ixgbe_hic_phy_activity_resp rsp; + } hic; + u16 retries = FW_PHY_ACT_RETRIES; + s32 rc; + u32 i; + + do { + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; + hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.activity_id = cpu_to_le16(activity); + for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) + hic.cmd.data[i] = cpu_to_be32((*data)[i]); + + rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (rc) + return rc; + if (hic.rsp.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + (*data)[i] = be32_to_cpu(hic.rsp.data[i]); + return 0; + } + usleep_range(20, 30); + --retries; + } while (retries > 0); + + return IXGBE_ERR_HOST_INTERFACE_COMMAND; +} + +static const struct { + u16 fw_speed; + ixgbe_link_speed phy_speed; +} ixgbe_fw_map[] = { + { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, + { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, + { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, +}; + +/** + * ixgbe_get_phy_id_fw - Get the phy ID via firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + u16 phy_speeds; + u16 phy_id_lo; + s32 rc; + u16 i; + + if (hw->phy.id) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); + if (rc) + return rc; + + hw->phy.speeds_supported = 0; + phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (phy_speeds & ixgbe_fw_map[i].fw_speed) + hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; + } + + hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; + if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + return IXGBE_ERR_PHY_ADDR_INVALID; + + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + return 0; +} + +/** + * ixgbe_identify_phy_fw - Get PHY type based on firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +{ + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + return ixgbe_get_phy_id_fw(hw); +} + +/** + * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + + setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; + return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_rx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_tx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + default: + break; + } + + for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { + if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) + setup[0] |= ixgbe_fw_map[i].fw_speed; + } + setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; + + if (hw->phy.eee_speeds_advertised) + setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); + if (rc) + return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) + return IXGBE_ERR_OVERTEMP; + return 0; +} + +/** + * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +{ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + return ixgbe_setup_fw_link(hw); +} + /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * @@ -624,41 +834,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, return status; } -/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface - * command assuming that the semaphore is already obtained. - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the hostif. - **/ -static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, - u16 *data) -{ - s32 status; - struct ixgbe_hic_read_shadow_ram buffer; - - buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; - buffer.hdr.req.buf_lenh = 0; - buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; - buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; - - /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); - /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); - - status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, false); - if (status) - return status; - - *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, - FW_NVM_DATA_OFFSET); - - return 0; -} - /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read @@ -670,6 +845,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; @@ -677,7 +853,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u32 i; /* Take semaphore for the entire operation. */ - status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) { hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); return status; @@ -698,10 +874,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, &buffer, - sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, - false); + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); if (status) { hw_dbg(hw, "Host interface command failed\n"); goto out; @@ -725,7 +899,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, } out: - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, mask); return status; } @@ -896,15 +1070,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) **/ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status = 0; + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); } + hw->mac.ops.release_swfw_sync(hw, mask); return status; } @@ -1768,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, return rc; } +/** + * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + */ +static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (!status) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1780,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) mac->ops.setup_fc = NULL; mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; break; + case ixgbe_media_type_copper: + if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && + hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + break; + } + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = ixgbe_check_mac_link_generic; + break; case ixgbe_media_type_backplane: mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; @@ -1827,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; - return; + break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) @@ -1870,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; + } + /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { /* CS4227 SFP must not enable auto-negotiation */ @@ -2108,8 +2435,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); @@ -2189,12 +2514,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /** * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure - * - * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - if (hw->mac.type != ixgbe_mac_X550EM_x) + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return 0; return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); @@ -2356,6 +2680,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) return 0; } +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val; + int i; + + if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status != + FW_CEM_RESP_STATUS_SUCCESS) + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return 0; + } + + return ret_val; +} + /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed @@ -2655,6 +3035,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) } /** + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; + + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; + } + return 0; +} + +/** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure * @@ -2740,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; + break; default: break; } @@ -2777,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: media_type = ixgbe_media_type_copper; break; default: @@ -2844,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) hlreg0 &= ~IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; default: break; } @@ -3275,7 +3712,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .clear_vfta = &ixgbe_clear_vfta_generic, \ .set_vfta = &ixgbe_set_vfta_generic, \ .fc_enable = &ixgbe_fc_enable_generic, \ - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ @@ -3355,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; +static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { + X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, + .init_led_link_act = ixgbe_init_led_link_act_generic, + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .fc_autoneg = ixgbe_fc_autoneg, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, +}; + #define X550_COMMON_EEP \ .read = &ixgbe_read_ee_hostif_X550, \ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ @@ -3384,12 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = NULL, \ - .check_overtemp = &ixgbe_tn_check_overtemp, \ - .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + .set_phy_power = NULL, static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = NULL, .identify = &ixgbe_identify_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3398,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = { static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_generic, @@ -3406,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY + .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_x550a, @@ -3414,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = { .write_reg_mdi = &ixgbe_write_phy_reg_mdi, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { + X550_COMMON_PHY + .check_overtemp = ixgbe_check_overtemp_fw, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_fw, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + static const struct ixgbe_link_operations link_ops_x550em_x = { .read_link = &ixgbe_read_i2c_combined_generic, .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -3463,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, }; + +const struct ixgbe_info ixgbe_x550em_a_fw_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = ixgbe_get_invariants_X550_a_fw, + .mac_ops = &mac_ops_x550em_a_fw, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 508e72c5f1c2..1f6c0ecd50bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } /* populate Rx queue data */ @@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, if (!ring) { data[i++] = 0; data[i++] = 0; -#ifdef BP_EXTENDED_STATS - data[i++] = 0; - data[i++] = 0; - data[i++] = 0; -#endif continue; } @@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; -#ifdef BP_EXTENDED_STATS - data[i] = ring->stats.yields; - data[i + 1] = ring->stats.misses; - data[i + 2] = ring->stats.cleaned; - i += 3; -#endif } } @@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "tx_queue_%u_bp_napi_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef BP_EXTENDED_STATS - sprintf(p, "rx_queue_%u_bp_poll_yield", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_misses", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bp_cleaned", i); - p += ETH_GSTRING_LEN; -#endif /* BP_EXTENDED_STATS */ } break; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 5639fbe294d0..a8cbc2dda0dd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -37,11 +37,6 @@ #include "vf.h" -#ifdef CONFIG_NET_RX_BUSY_POLL -#include <net/busy_poll.h> -#define BP_EXTENDED_STATS -#endif - #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer { struct ixgbevf_stats { u64 packets; u64 bytes; -#ifdef BP_EXTENDED_STATS - u64 yields; - u64 misses; - u64 cleaned; -#endif }; struct ixgbevf_tx_queue_stats { @@ -217,109 +207,6 @@ struct ixgbevf_q_vector { #endif /* CONFIG_NET_RX_BUSY_POLL */ }; -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) -{ - spin_lock_init(&q_vector->lock); - q_vector->state = IXGBEVF_QV_STATE_IDLE; -} - -/* called from the device poll routine to get ownership of a q_vector */ -static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_LOCKED) { - WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); - q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->tx.ring->stats.yields++; -#endif - } else { - /* we don't care if someone yielded */ - q_vector->state = IXGBEVF_QV_STATE_NAPI; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true is someone tried to get the qv while napi had it */ -static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | - IXGBEVF_QV_STATE_NAPI_YIELD)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* called from ixgbevf_low_latency_poll() */ -static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if ((q_vector->state & IXGBEVF_QV_LOCKED)) { - q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; - rc = false; -#ifdef BP_EXTENDED_STATS - q_vector->rx.ring->stats.yields++; -#endif - } else { - /* preserve yield marks */ - q_vector->state |= IXGBEVF_QV_STATE_POLL; - } - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* returns true if someone tried to get the qv while it was locked */ -static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) -{ - int rc = false; - - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); - - if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) - rc = true; - /* reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -/* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) -{ - WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); - return q_vector->state & IXGBEVF_QV_USER_PEND; -} - -/* false if QV is currently owned */ -static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) -{ - int rc = true; - - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBEVF_QV_OWNED) - rc = false; - q_vector->state |= IXGBEVF_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; -} - -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ @@ -464,6 +351,7 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6d4bef5803f2..80bab261a0ec 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb) { -#ifdef CONFIG_NET_RX_BUSY_POLL - skb_mark_napi_id(skb, &q_vector->napi); - - if (ixgbevf_qv_busy_polling(q_vector)) { - netif_receive_skb(skb); - /* exit early if we busy polled */ - return; - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ - napi_gro_receive(&q_vector->napi, skb); } @@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (budget <= 0) return budget; -#ifdef CONFIG_NET_RX_BUSY_POLL - if (!ixgbevf_qv_lock_napi(q_vector)) - return budget; -#endif /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling @@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) clean_complete = false; } -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_unlock_napi(q_vector); -#endif - /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int ixgbevf_busy_poll_recv(struct napi_struct *napi) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *ring; - int found = 0; - - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return LL_FLUSH_FAILED; - - if (!ixgbevf_qv_lock_poll(q_vector)) - return LL_FLUSH_BUSY; - - ixgbevf_for_each_ring(ring, q_vector->rx) { - found = ixgbevf_clean_rx_irq(q_vector, ring, 4); -#ifdef BP_EXTENDED_STATS - if (found) - ring->stats.cleaned += found; - else - ring->stats.misses++; -#endif - if (found) - break; - } - - ixgbevf_qv_unlock_poll(q_vector); - - return found; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -1930,6 +1878,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; + /* request the most inclusive mode we need */ + if (flags & IFF_PROMISC) + xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; + else if (flags & IFF_ALLMULTI) + xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; + else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = IXGBEVF_XCAST_MODE_MULTI; + else + xcast_mode = IXGBEVF_XCAST_MODE_NONE; + spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.update_xcast_mode(hw, xcast_mode); @@ -1950,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; -#ifdef CONFIG_NET_RX_BUSY_POLL - ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); -#endif napi_enable(&q_vector->napi); } } @@ -1966,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); -#ifdef CONFIG_NET_RX_BUSY_POLL - while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { - pr_info("QV %d locked\n", q_idx); - usleep_range(1000, 20000); - } -#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -2071,7 +2020,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_12, + int api[] = { ixgbe_mbox_api_13, + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; @@ -2373,6 +2323,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; default: @@ -3228,6 +3179,21 @@ err_setup_reset: } /** + * ixgbevf_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) +{ + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); +} + +/** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure * @@ -3242,11 +3208,8 @@ int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); + if (netif_device_present(netdev)) + ixgbevf_close_suspend(adapter); return 0; } @@ -3268,6 +3231,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ + rtnl_lock(); + if (netif_running(dev)) ixgbevf_close(dev); @@ -3276,6 +3241,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) if (netif_running(dev)) ixgbevf_open(dev); + + rtnl_unlock(); } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, @@ -3796,17 +3763,14 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - if (netif_running(netdev)) { - rtnl_lock(); - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); - ixgbevf_clear_interrupt_scheme(adapter); - rtnl_unlock(); - } + if (netif_running(netdev)) + ixgbevf_close_suspend(adapter); + + ixgbevf_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -3838,6 +3802,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } + + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -3869,8 +3835,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ixgbevf_suspend(pdev, PMSG_SUSPEND); } -static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void ixgbevf_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; @@ -3903,8 +3869,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, stats->tx_bytes += bytes; stats->tx_packets += packets; } - - return stats; } #define IXGBEVF_MAX_MAC_HDR_LEN 127 @@ -3953,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ixgbevf_busy_poll_recv, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif @@ -4102,6 +4063,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4244,7 +4206,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, } if (netif_running(netdev)) - ixgbevf_down(adapter); + ixgbevf_close_suspend(adapter); if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -4272,6 +4234,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } + adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); @@ -4292,12 +4255,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); if (netif_running(netdev)) - ixgbevf_up(adapter); + ixgbevf_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } /* PCI Error Recovery (ERS) */ diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 340cdd469455..bc0442acae78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d46ba1dabcb7..8a5db9d7219d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * Thus return an error if API doesn't support RETA querying or querying * is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RETA; @@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * Thus return an error if API doesn't support RSS Random Key retrieval * or if the operation is not supported for this device type. */ - if (hw->api_version != ixgbe_mbox_api_12 || - hw->mac.type >= ixgbe_mac_X550_vf) + switch (hw->api_version) { + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_12: + if (hw->mac.type >= ixgbe_mac_X550_vf) + break; + default: return -EOPNOTSUPP; + } msgbuf[0] = IXGBE_VF_GET_RSS_KEY; err = hw->mbx.ops.write_posted(hw, msgbuf, 1); @@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) switch (hw->api_version) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -EOPNOTSUPP; @@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return 0; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f9fcab54783c..f580b49e6b67 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev) jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); @@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev) jme->phylink = 0; jme_reset_phy_processor(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); /* * Force to Reset the link again @@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev, } static int -jme_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); return rc; } static int -jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct jme_adapter *jme = netdev_priv(netdev); int rc, fdc = 0; - if (ethtool_cmd_speed(ecmd) == SPEED_1000 - && ecmd->autoneg != AUTONEG_ENABLE) + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; /* @@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev, * Hardware would not generate link change interrupt. */ if (jme->mii_if.force_media && - ecmd->autoneg != AUTONEG_ENABLE && - (jme->mii_if.full_duplex != ecmd->duplex)) + cmd->base.autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != cmd->base.duplex)) fdc = 1; spin_lock_bh(&jme->phy_lock); - rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); spin_unlock_bh(&jme->phy_lock); if (!rc) { if (fdc) jme_reset_link(jme); - jme->old_ecmd = *ecmd; + jme->old_cmd = *cmd; set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) if (!rc && (cmd == SIOCSMIIREG)) { if (duplex_chg) jme_reset_link(jme); - jme_get_settings(netdev, &jme->old_ecmd); + jme_get_link_ksettings(netdev, &jme->old_cmd); set_bit(JME_FLAG_SSET, &jme->flags); } @@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = { .set_pauseparam = jme_set_pauseparam, .get_wol = jme_get_wol, .set_wol = jme_set_wol, - .get_settings = jme_get_settings, - .set_settings = jme_set_settings, .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, @@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, .set_eeprom = jme_set_eeprom, + .get_link_ksettings = jme_get_link_ksettings, + .set_link_ksettings = jme_set_link_ksettings, }; static int @@ -3306,7 +3306,7 @@ jme_resume(struct device *dev) jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) - jme_set_settings(netdev, &jme->old_ecmd); + jme_set_link_ksettings(netdev, &jme->old_cmd); else jme_reset_phy_processor(jme); jme_phy_calibration(jme); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 58cd67c0c8e4..89535c019f04 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -447,7 +447,7 @@ struct jme_adapter { u8 chip_sub_rev; u8 pcirev; u32 msg_enable; - struct ethtool_cmd old_ecmd; + struct ethtool_link_ksettings old_cmd; unsigned int old_mtu; struct dynpcc_info dpi; atomic_t intr_sem; @@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev) /* * Function prototypes */ -static int jme_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd); +static int jme_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); static void jme_set_unicastaddr(struct net_device *netdev); static void jme_set_multi(struct net_device *netdev); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 8037426ec50f..9fae98caf83a 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget) work_done = korina_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); writel(readl(&lp->rx_dma_regs->dmasm) & ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), @@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_gset(&lp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct korina_private *lp = netdev_priv(dev); int rc; spin_lock_irq(&lp->lock); - rc = mii_ethtool_sset(&lp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); spin_unlock_irq(&lp->lock); korina_set_carrier(&lp->mii_if); @@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index faea52da8dae..afc810069440 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) { struct ltq_etop_chan *ch = container_of(napi, struct ltq_etop_chan, napi); - int rx = 0; - int complete = 0; + int work_done = 0; - while ((rx < budget) && !complete) { + while (work_done < budget) { struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - ltq_etop_hw_receive(ch); - rx++; - } else { - complete = 1; - } + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + ltq_etop_hw_receive(ch); + work_done++; } - if (complete || !rx) { - napi_complete(&ch->napi); + if (work_done < budget) { + napi_complete_done(&ch->napi, work_done); ltq_dma_ack_irq(&ch->dma); } - return rx; + return work_done; } static int diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1fa7c03edec2..20cb7f0de601 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2319,7 +2319,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); - napi_complete(napi); + napi_complete_done(napi, work_done); wrlp(mp, INT_MASK, mp->int_mask); } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e05e22705cf7..de6c47744b8e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -224,6 +224,7 @@ #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_DEC_SENT_MASK 0xff #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 @@ -525,6 +526,7 @@ struct mvneta_tx_queue { * descriptor ring */ int count; + int pending; int tx_stop_threshold; int tx_wake_threshold; @@ -652,7 +654,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp) } /* Get System Network Statistics */ -static struct rtnl_link_stats64 * +static void mvneta_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -686,8 +688,6 @@ mvneta_get_stats64(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } /* Rx descriptors helper methods */ @@ -820,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, /* Only 255 descriptors can be added at once ; Assume caller * process TX desriptors in quanta less than 256 */ - val = pend_desc; + val = pend_desc + txq->pending; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + txq->pending = 0; } /* Get pointer to next TX descriptor to be processed (send) by HW */ @@ -1758,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, - struct mvneta_tx_queue *txq, int num) + struct mvneta_tx_queue *txq, int num, + struct netdev_queue *nq) { + unsigned int bytes_compl = 0, pkts_compl = 0; int i; for (i = 0; i < num; i++) { @@ -1767,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, txq->txq_get_index; struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + if (skb) { + bytes_compl += skb->len; + pkts_compl++; + } + mvneta_txq_inc_get(txq); if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) @@ -1777,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, continue; dev_kfree_skb_any(skb); } + + netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } /* Handle end of transmission */ @@ -1790,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, if (!tx_done) return; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); txq->count -= tx_done; @@ -2400,12 +2410,18 @@ out: struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); + netdev_tx_sent_queue(nq, len); + txq->count += frags; if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); + if (!skb->xmit_more || netif_xmit_stopped(nq) || + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) + mvneta_txq_pend_desc_add(pp, txq, frags); + else + txq->pending += frags; + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; @@ -2424,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done = txq->count; - mvneta_txq_bufs_free(pp, txq, tx_done); + mvneta_txq_bufs_free(pp, txq, tx_done, nq); /* reset txq */ txq->count = 0; @@ -2750,11 +2767,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget) rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } - budget -= rx_done; - - if (budget > 0) { + if (rx_done < budget) { cause_rx_tx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); if (pp->neta_armada3700) { unsigned long flags; @@ -2952,6 +2967,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + kfree(txq->tx_skb); if (txq->tso_hdrs) @@ -2963,6 +2980,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); + netdev_tx_reset_queue(nq); + txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4fe430ceb194..c2fd7c36f927 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (budget > 0) { cause_rx = 0; - napi_complete(napi); + napi_complete_done(napi, rx_done); mvpp2_interrupts_enable(port); } @@ -5739,7 +5739,7 @@ error: return err; } -static struct rtnl_link_stats64 * +static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); @@ -5771,8 +5771,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3af2814ada23..3376a19f1e19 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1261,7 +1261,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) } work_done = rxq_process(dev, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); wrl(pep, INT_MASK, ALL_INTS); } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9146a514fb33..edb95271a4f2 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw) return supported; } -static int skge_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int skge_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = skge_supported_modes(hw); + supported = skge_supported_modes(hw); if (hw->copper) { - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; } else - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + + advertising = skge->advertising; + cmd->base.autoneg = skge->autoneg; + cmd->base.speed = skge->speed; + cmd->base.duplex = skge->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); - ecmd->advertising = skge->advertising; - ecmd->autoneg = skge->autoneg; - ethtool_cmd_speed_set(ecmd, skge->speed); - ecmd->duplex = skge->duplex; return 0; } -static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int skge_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); int err = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); - if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising = supported; + if (cmd->base.autoneg == AUTONEG_ENABLE) { + advertising = supported; skge->duplex = -1; skge->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; skge->speed = speed; - skge->duplex = ecmd->duplex; + skge->duplex = cmd->base.duplex; } - skge->autoneg = ecmd->autoneg; - skge->advertising = ecmd->advertising; + skge->autoneg = cmd->base.autoneg; + skge->advertising = advertising; if (netif_running(dev)) { skge_down(dev); @@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom } static const struct ethtool_ops skge_ethtool_ops = { - .get_settings = skge_get_settings, - .set_settings = skge_set_settings, .get_drvinfo = skge_get_drvinfo, .get_regs_len = skge_get_regs_len, .get_regs = skge_get_regs, @@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_phys_id = skge_set_phys_id, .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_link_ksettings = skge_get_link_ksettings, + .set_link_ksettings = skge_set_link_ksettings, }; /* @@ -3190,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev) } } -static int skge_poll(struct napi_struct *napi, int to_do) +static int skge_poll(struct napi_struct *napi, int budget) { struct skge_port *skge = container_of(napi, struct skge_port, napi); struct net_device *dev = skge->netdev; @@ -3203,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); - for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; u32 control; @@ -3225,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do) wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); - if (work_done < to_do) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - napi_gro_flush(napi, false); spin_lock_irqsave(&hw->hw_lock, flags); - __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index b60ad0e56a9f..2b2cc3f3ca10 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); - dev->last_rx = jiffies; + sky2->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } @@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev) u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ - if (sky2->check.last == dev->last_rx && + if (sky2->check.last == sky2->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ @@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev) fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { - sky2->check.last = dev->last_rx; + sky2->check.last = sky2->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; @@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | SUPPORTED_1000baseT_Full; } -static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = sky2_supported_modes(hw); - ecmd->phy_address = PHY_ADDR_MARV; + supported = sky2_supported_modes(hw); + cmd->base.phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, sky2->speed); - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + cmd->base.port = PORT_TP; + cmd->base.speed = sky2->speed; + supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { - ethtool_cmd_speed_set(ecmd, SPEED_1000); - ecmd->port = PORT_FIBRE; - ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } - ecmd->advertising = sky2->advertising; - ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + advertising = sky2->advertising; + cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - ecmd->duplex = sky2->duplex; + cmd->base.duplex = sky2->duplex; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int sky2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); + u32 new_advertising; - if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ~supported) + ethtool_convert_link_mode_to_legacy_u32(&new_advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (new_advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else - sky2->advertising = ecmd->advertising | + sky2->advertising = new_advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; @@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->speed = -1; } else { u32 setting; - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) + else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; @@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return -EINVAL; sky2->speed = speed; - sky2->duplex = ecmd->duplex; + sky2->duplex = cmd->base.duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } @@ -3888,8 +3900,8 @@ static void sky2_set_multicast(struct net_device *dev) gma_write16(hw, port, GM_RX_CTRL, reg); } -static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -3929,8 +3941,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->tx_fifo_errors = dev->stats.tx_fifo_errors; - - return stats; } /* Can have one global because blinking is controlled by @@ -4407,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) } static const struct ethtool_ops sky2_ethtool_ops = { - .get_settings = sky2_get_settings, - .set_settings = sky2_set_settings, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, @@ -4431,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, + .get_link_ksettings = sky2_get_link_ksettings, + .set_link_ksettings = sky2_set_link_ksettings, }; #ifdef CONFIG_SKY2_DEBUG diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ec6dcd80152b..0fe160796842 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2247,6 +2247,7 @@ struct sky2_port { u16 rx_data_size; u16 rx_nfrags; + unsigned long last_rx; struct { unsigned long last; u32 mac_rp; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1c29c86f8709..9e757684816d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth) } } -static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) +static void mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_hw_stats *hw_stats = mac->hw_stats; @@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, storage->tx_errors = dev->stats.tx_errors; storage->rx_dropped = dev->stats.rx_dropped; storage->tx_dropped = dev->stats.tx_dropped; - - return storage; } static inline int mtk_max_frag_size(int mtu) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 9aa422691954..c4d714fcc7da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; + u8 cur_autoneg; int ret; u32 ptys_adv = ethtool2ptys_link_modes( @@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return 0; } - proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - cpu_to_be32(ptys_adv) : - speed_set_ptys_admin(priv, speed, - ptys_reg.eth_proto_cap); + cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + + if (link_ksettings->base.autoneg == AUTONEG_DISABLE) { + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + if ((be32_to_cpu(proto_admin) & + (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) | + MLX4_PROT_MASK(MLX4_1000BASE_KX))) && + (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP)) + ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN; + } else { + proto_admin = cpu_to_be32(ptys_adv); + ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN; + } proto_admin &= ptys_reg.eth_proto_cap; if (!proto_admin) { @@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, return -EINVAL; /* nothing to change due to bad input */ } - if (proto_admin == ptys_reg.eth_proto_admin) + if ((proto_admin == ptys_reg.eth_proto_admin) && + ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) && + (link_ksettings->base.autoneg == cur_autoneg))) return 0; /* Nothing to change */ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", @@ -1788,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); - if (dev->num_tc) + if (netdev_get_num_tc(dev)) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); @@ -1980,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: - return -ENOSYS; + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3b4961a8e8e4..748e9f65c386 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1321,7 +1321,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } -static struct rtnl_link_stats64 * +static void mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1330,8 +1330,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx4_en_fold_software_stats(dev); netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); - - return stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) @@ -1697,6 +1695,14 @@ int mlx4_en_start_port(struct net_device *dev) priv->port, err); goto tx_err; } + + err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); + if (err) { + en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", + dev->mtu, priv->port, err); + goto tx_err; + } + /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h index 040da4b16b1c..930f961fee42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -35,7 +35,6 @@ #define _MLX4_EN_PORT_H_ -#define SET_PORT_GEN_ALL_VALID 0x7 #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac527e25ec9..f15ddba3659a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -33,6 +33,7 @@ #include <net/busy_poll.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -706,7 +707,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, do { if (mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask, - GFP_ATOMIC | __GFP_COLD)) + GFP_ATOMIC | __GFP_COLD | + __GFP_MEMALLOC)) break; ring->prod++; } while (--missing); @@ -925,10 +927,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud length, cq->ring, &doorbell_pending))) goto consumed; + trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 5886ad78058f..3ed42199d3f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; u8 up = 0; - if (dev->num_tc) + if (netdev_get_num_tc(dev)) return skb_tx_hash(dev, skb); if (skb_vlan_tag_present(skb)) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 84bab9f0732e..3fe885ce1902 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); func_cap->physical_port = field; if (func_cap->physical_port != gen_or_port) { - err = -ENOSYS; + err = -EINVAL; goto out; } @@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = - (ilog2(cache_line_size()) - 4) << 5; + ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); #if defined(__LITTLE_ENDIAN) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); @@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + context->flags2 |= SET_PORT_GEN_PHV_VALID; if (phv_bit) context->phv_en |= SET_PORT_GEN_PHV_EN; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bffa6f345f2f..15ef787e71ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) */ if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); - return -ENOSYS; + return -EINVAL; } mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; @@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); - return -ENOSYS; + return -EINVAL; } dev->caps.num_ports = func_cap.num_ports; @@ -3492,7 +3492,7 @@ slave_start: mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { - err = -ENOSYS; + err = -EOPNOTSUPP; mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 086920b615af..b4f1bc56cc68 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -487,6 +487,7 @@ struct mlx4_slave_state { bool vst_qinq_supported; u8 function; dma_addr_t vhcr_dma; + u16 user_mtu[MLX4_MAX_PORTS + 1]; u16 mtu[MLX4_MAX_PORTS + 1]; __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; @@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u16 max_user_mtu[MLX4_MAX_PORTS + 1]; u8 pptx; u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; @@ -774,7 +776,9 @@ struct mlx4_vlan_table { int max; }; -#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \ + MLX4_FLAG_V_PPRX_MASK | \ + MLX4_FLAG_V_PPTX_MASK) #define SET_PORT_PROMISC_SHIFT 31 #define SET_PORT_MC_PROMISC_SHIFT 30 @@ -787,7 +791,7 @@ enum { struct mlx4_set_port_general_context { u16 reserved1; - u8 v_ignore_fcs; + u8 flags2; u8 flags; union { u8 ignore_fcs; @@ -803,7 +807,8 @@ struct mlx4_set_port_general_context { u16 reserved4; u32 reserved5; u8 phv_en; - u8 reserved6[3]; + u8 reserved6[5]; + __be16 user_mtu; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index b656dd5772e5..5053c949148f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -50,7 +50,11 @@ #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL -#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1) +#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5) +#define MLX4_FLAG_V_MTU_MASK BIT(0) +#define MLX4_FLAG_V_PPRX_MASK BIT(1) +#define MLX4_FLAG_V_PPTX_MASK BIT(2) #define MLX4_IGNORE_FCS_MASK 0x1 #define MLX4_TC_MAX_NUMBER 8 @@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) return; } +static void +mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 mtu, prev_mtu; + + /* Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) { + int i; + + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_mtu[port] = + max_t(u16, master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); +} + +static void +mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + u16 user_mtu, prev_user_mtu; + + /* User Mtu is configured as the max USER_MTU among all + * the functions on the port. + */ + user_mtu = be16_to_cpu(gen_context->user_mtu); + user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]); + prev_user_mtu = slave_st->user_mtu[port]; + slave_st->user_mtu[port] = user_mtu; + if (user_mtu > master->max_user_mtu[port]) + master->max_user_mtu[port] = user_mtu; + if (user_mtu < prev_user_mtu && + prev_user_mtu == master->max_user_mtu[port]) { + int i; + + slave_st->user_mtu[port] = user_mtu; + master->max_user_mtu[port] = user_mtu; + for (i = 0; i < dev->num_slaves; i++) + master->max_user_mtu[port] = + max_t(u16, master->max_user_mtu[port], + master->slave_state[i].user_mtu[port]); + } + gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]); +} + +static void +mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave, + struct mlx4_set_port_general_context *gen_context) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + (gen_context->pptx != master->pptx || + gen_context->pprx != master->pprx)) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_port_info *port_info; - struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; - struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; @@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, int base; u32 in_modifier; u32 promisc; - u16 mtu, prev_mtu; int err; int i, j; int offset; @@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, is_eth = op_mod; port_info = &priv->port[port]; - /* Slaves cannot perform SET_PORT operations except changing MTU */ + /* Slaves cannot perform SET_PORT operations, + * except for changing MTU and USER_MTU. + */ if (is_eth) { if (slave != dev->caps.function && in_modifier != MLX4_SET_PORT_GENERAL && @@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, break; case MLX4_SET_PORT_GENERAL: gen_context = inbox->buf; - /* Mtu is configured as the max MTU among all the - * the functions on the port. */ - mtu = be16_to_cpu(gen_context->mtu); - mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + - ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); - prev_mtu = slave_st->mtu[port]; - slave_st->mtu[port] = mtu; - if (mtu > master->max_mtu[port]) - master->max_mtu[port] = mtu; - if (mtu < prev_mtu && prev_mtu == - master->max_mtu[port]) { - slave_st->mtu[port] = mtu; - master->max_mtu[port] = mtu; - for (i = 0; i < dev->num_slaves; i++) { - master->max_mtu[port] = - max(master->max_mtu[port], - master->slave_state[i].mtu[port]); - } - } - gen_context->mtu = cpu_to_be16(master->max_mtu[port]); - /* Slave cannot change Global Pause configuration */ - if (slave != mlx4_master_func_num(dev) && - ((gen_context->pptx != master->pptx) || - (gen_context->pprx != master->pprx))) { - gen_context->pptx = master->pptx; - gen_context->pprx = master->pprx; - mlx4_warn(dev, - "denying Global Pause change for slave:%d\n", - slave); - } else { - master->pptx = gen_context->pptx; - master->pprx = gen_context->pprx; - } + if (gen_context->flags & MLX4_FLAG_V_MTU_MASK) + mlx4_en_set_port_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK) + mlx4_en_set_port_user_mtu(dev, slave, port, + gen_context); + + if (gen_context->flags & + (MLX4_FLAG_V_PPRX_MASK || MLX4_FLAG_V_PPTX_MASK)) + mlx4_en_set_port_global_pause(dev, slave, + gen_context); + break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids @@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, } EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK; + context->user_mtu = cpu_to_be16(user_mtu); + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu); + int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) { struct mlx4_cmd_mailbox *mailbox; @@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK; if (ignore_fcs_value) context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; else diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1822382212ee..6fe9f76ae656 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -77,6 +77,7 @@ struct res_common { int from_state; int to_state; int removing; + const char *func_name; }; enum { @@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id) struct rb_node *node = root->rb_node; while (node) { - struct res_common *res = container_of(node, struct res_common, - node); + struct res_common *res = rb_entry(node, struct res_common, + node); if (res_id < res->res_id) node = node->rb_left; @@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res) /* Figure out where to put new node */ while (*new) { - struct res_common *this = container_of(*new, struct res_common, - node); + struct res_common *this = rb_entry(*new, struct res_common, + node); parent = *new; if (res->res_id < this->res_id) @@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } +static const char *mlx4_resource_type_to_str(enum mlx4_resource t) +{ + switch (t) { + case RES_QP: + return "QP"; + case RES_CQ: + return "CQ"; + case RES_SRQ: + return "SRQ"; + case RES_XRCD: + return "XRCD"; + case RES_MPT: + return "MPT"; + case RES_MTT: + return "MTT"; + case RES_MAC: + return "MAC"; + case RES_VLAN: + return "VLAN"; + case RES_COUNTER: + return "COUNTER"; + case RES_FS_RULE: + return "FS_RULE"; + case RES_EQ: + return "EQ"; + default: + return "INVALID RESOURCE"; + } +} + static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { @@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id, res_id); } -static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, - enum mlx4_resource type, - void *res) +static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res, const char *func_name) { struct res_common *r; int err = 0; @@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, } if (r->state == RES_ANY_BUSY) { + mlx4_warn(dev, + "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n", + func_name, slave, res_id, mlx4_resource_type_to_str(type), + r->func_name); err = -EBUSY; goto exit; } @@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, r->from_state = r->state; r->state = RES_ANY_BUSY; + r->func_name = func_name; if (res) *((struct res_common **)res) = r; @@ -881,6 +917,9 @@ exit: return err; } +#define get_res(dev, slave, res_id, type, res) \ + _get_res((dev), (slave), (res_id), (type), (res), __func__) + int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, u64 res_id, int *slave) @@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); - if (r) + if (r) { r->state = r->from_state; + r->func_name = ""; + } spin_unlock_irq(mlx4_tlock(dev)); } @@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: - return -ENOSYS; + return -EOPNOTSUPP; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 32d4af9b594d..336d4738b807 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + cq->uar = dev->priv.uar; + return 0; err_cmd: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a9dbc28f6b97..a62f4b6a21a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -71,6 +71,16 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (dev_ctx->intf->pfault) { + if (priv->pfault) { + mlx5_core_err(dev, "multiple page fault handlers not supported"); + } else { + priv->pfault_ctx = dev_ctx->context; + priv->pfault = dev_ctx->intf->pfault; + } + } +#endif spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); @@ -97,6 +107,15 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (!dev_ctx) return; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + spin_lock_irq(&priv->ctx_lock); + if (priv->pfault == dev_ctx->intf->pfault) + priv->pfault = NULL; + spin_unlock_irq(&priv->ctx_lock); + + synchronize_srcu(&priv->pfault_srcu); +#endif + spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); @@ -329,6 +348,20 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_unlock_irqrestore(&priv->ctx_lock, flags); } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault) +{ + struct mlx5_priv *priv = &dev->priv; + int srcu_idx; + + srcu_idx = srcu_read_lock(&priv->pfault_srcu); + if (priv->pfault) + priv->pfault(dev, priv->pfault_ctx, pfault); + srcu_read_unlock(&priv->pfault_srcu, srcu_idx); +} +#endif + void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d5ecb8f53fd4..9b52c58cd528 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -51,6 +51,9 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) +#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -98,6 +101,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 @@ -259,6 +263,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; + u8 *pps_pin_caps; }; enum { @@ -369,6 +374,7 @@ struct mlx5e_rq { unsigned long state; int ix; + u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ struct bpf_prog *xdp_prog; @@ -465,7 +471,6 @@ struct mlx5e_sq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; - void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; u16 bf_buf_size; @@ -479,7 +484,7 @@ struct mlx5e_sq { /* control path */ struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_uar uar; + struct mlx5_sq_bfreg bfreg; struct mlx5e_channel *channel; int tc; u32 rate_limit; @@ -568,8 +573,9 @@ struct mlx5e_vlan_table { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; - struct mlx5_flow_handle *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *any_cvlan_rule; + struct mlx5_flow_handle *any_svlan_rule; + bool filter_disabled; }; struct mlx5e_l2_table { @@ -777,9 +783,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, struct skb_shared_hwtstamps *hwts); void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); @@ -807,7 +815,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { - u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; + u16 ofst = sq->bf_offset; /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); @@ -819,9 +827,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); if (bf_sz) - __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); + __iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz); else - mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); + mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL); /* flush the write-combining mapped buffer */ wmb(); @@ -833,7 +841,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) struct mlx5_core_cq *mcq; mcq = &cq->mcq; - mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) @@ -841,12 +849,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } -static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) -{ - return min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 746a92c13644..37e66eef6fb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -37,6 +37,22 @@ enum { MLX5E_CYCLES_SHIFT = 23 }; +enum { + MLX5E_PIN_MODE_IN = 0x0, + MLX5E_PIN_MODE_OUT = 0x1, +}; + +enum { + MLX5E_OUT_PATTERN_PULSE = 0x0, + MLX5E_OUT_PATTERN_PERIODIC = 0x1, +}; + +enum { + MLX5E_EVENT_MODE_DISABLE = 0x0, + MLX5E_EVENT_MODE_REPETETIVE = 0x1, + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + mutex_lock(&priv->state_lock); /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); + mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ netdev_warn(dev, "Disabling cqe compression"); - mlx5e_modify_rx_cqe_compression(priv, false); + mlx5e_modify_rx_cqe_compression_locked(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: + mutex_unlock(&priv->state_lock); return -ERANGE; } memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; @@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + + if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + /* For future use need to add a loop for finding all 1PPS out pins */ + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); + + mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + } if (delta < 0) { neg_adj = 1; @@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) return 0; } +static int mlx5e_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u8 pattern = 0; + int pin = -1; + int err = 0; + + if (!MLX5_CAP_GEN(priv->mdev, pps) || + !MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->extts.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + } + + if (rq->extts.flags & PTP_FALLING_EDGE) + pattern = 1; + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u64 nsec_now, nsec_delta, time_stamp; + u64 cycles_now, cycles_delta; + struct timespec64 ts; + unsigned long flags; + int pin = -1; + s64 ns; + + if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + return -EOPNOTSUPP; + + if (rq->perout.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if (on) + if ((ns >> 1) != 500000000LL) + return -EINVAL; + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); + MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + + return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); +} + +static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mlx5e_extts_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return mlx5e_perout_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; +} + static const struct ptp_clock_info mlx5e_ptp_clock_info = { .owner = THIS_MODULE, .max_adj = 100000000, @@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = { .gettime64 = mlx5e_ptp_gettime, .settime64 = mlx5e_ptp_settime, .enable = NULL, + .verify = NULL, }; static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) @@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } +static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +{ + int i; + + tstamp->ptp_info.pin_config = + kzalloc(sizeof(*tstamp->ptp_info.pin_config) * + tstamp->ptp_info.n_pins, GFP_KERNEL); + if (!tstamp->ptp_info.pin_config) + return -ENOMEM; + tstamp->ptp_info.enable = mlx5e_ptp_enable; + tstamp->ptp_info.verify = mlx5e_ptp_verify; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + snprintf(tstamp->ptp_info.pin_config[i].name, + sizeof(tstamp->ptp_info.pin_config[i].name), + "mlx5_pps%d", i); + tstamp->ptp_info.pin_config[i].index = i; + tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; + tstamp->ptp_info.pin_config[i].chan = i; + } + + return 0; +} + +static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, + struct mlx5e_tstamp *tstamp) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + mlx5_query_mtpps(priv->mdev, out, sizeof(out)); + + tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); +} + +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + ptp_clock_event(tstamp->ptp, event); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; @@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) tstamp->ptp_info = mlx5e_ptp_clock_info; snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + /* Initialize 1PPS data structures */ +#define MAX_PIN_NUM 8 + tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); + if (tstamp->pps_pin_caps) { + if (MLX5_CAP_GEN(priv->mdev, pps)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); + } else { + mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); + } + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); if (IS_ERR(tstamp->ptp)) { @@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } + kfree(tstamp->pps_pin_caps); + kfree(tstamp->ptp_info.pin_config); + cancel_delayed_work_sync(&tstamp->overflow_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index f175518ff07a..bd898d8deda0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) struct mlx5e_resources *res = &mdev->mlx5e_res; int err; - err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false); - if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - return err; - } - err = mlx5_core_alloc_pd(mdev, &res->pdn); if (err) { mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + return err; } err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); @@ -121,9 +115,6 @@ err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, res->pdn); -err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &res->cq_uar); - return err; } @@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); - mlx5_unmap_free_uar(mdev, &res->cq_uar); } int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bb67863aa361..cc80522b5854 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + - NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); + + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], @@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - ch->max_combined = mlx5e_get_max_num_channels(priv->mdev); + ch->max_combined = priv->profile->max_nch(priv->mdev); ch->combined_count = priv->params.num_channels; } @@ -1459,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int err = 0; - bool reset; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1470,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - - if (reset) - mlx5e_close_locked(netdev); - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable); + mlx5e_modify_rx_cqe_compression_locked(priv, enable); priv->params.rx_cqe_compress_def = enable; - if (reset) - err = mlx5e_open_locked(netdev); - return err; + return 0; } static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index a0e5a69402b3..f2762e45c8ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, - MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, MLX5E_VLAN_RULE_TYPE_MATCH_VID, }; @@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, dest.ft = priv->fs.l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: rule_p = &priv->fs.vlan.untagged_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + rule_p = &priv->fs.vlan.any_cvlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + rule_p = &priv->fs.vlan.any_svlan_rule; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.cvlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, @@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.untagged_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->fs.vlan.any_vlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule); - priv->fs.vlan.any_vlan_rule = NULL; + case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: + if (priv->fs.vlan.any_cvlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); + priv->fs.vlan.any_cvlan_rule = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: + if (priv->fs.vlan.any_svlan_rule) { + mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); + priv->fs.vlan.any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: @@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } } +static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) +{ + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + +static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + if (err) + return err; + + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); +} + void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { if (!priv->fs.vlan.filter_disabled) @@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) @@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_add_any_vid_rules(priv); } static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) @@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) if (priv->fs.vlan.filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + mlx5e_del_any_vid_rules(priv); } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ @@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (enable_promisc) { mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.filter_disabled) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); @@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { if (!priv->fs.vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); + mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } @@ -976,11 +1010,13 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 2 +#define MLX5E_NUM_VLAN_GROUPS 3 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(1) +#define MLX5E_VLAN_GROUP2_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ - MLX5E_VLAN_GROUP1_SIZE) + MLX5E_VLAN_GROUP1_SIZE +\ + MLX5E_VLAN_GROUP2_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP0_SIZE; @@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index f33f72d0237c..d55fff0ba388 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, - vlan_tag, 1); + cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, first_vid, 0xfff); MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f14ca3385fdd..ab6f4d3b8063 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,6 +31,7 @@ */ #include <net/tc_act/tc_gact.h> +#include <linux/crash_dump.h> #include <net/pkt_cls.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> @@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.rq_wq_type = rq_type; switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.mpwqe_log_stride_sz = MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : @@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.log_rq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; } priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); @@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) &qcnt->rx_out_of_buffer); } +static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) + return; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { - mlx5e_update_q_counter(priv); - mlx5e_update_vport_counters(priv); + mlx5e_update_pcie_counters(priv); mlx5e_update_pport_counters(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_q_counter(priv); mlx5e_update_sw_counters(priv); } @@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; + struct ptp_clock_event ptp_event; + struct mlx5_eqe *eqe = NULL; if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; @@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; - + case MLX5_DEV_EVENT_PPS: + eqe = (struct mlx5_eqe *)param; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_event.index = eqe->data.pps.pin; + ptp_event.timestamp = + timecounter_cyc2time(&priv->tstamp.clock, + be64_to_cpu(eqe->data.pps.time_stamp)); + mlx5e_pps_event_handler(vpriv, &ptp_event); + break; default: break; } @@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } -#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) -#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) - static inline int mlx5e_get_wqe_mtt_sz(void) { /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. @@ -372,7 +413,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq, cseg->imm = rq->mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; - ucseg->klm_octowords = + ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); @@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.map_dir = DMA_FROM_DEVICE; - if (rq->xdp_prog) + if (rq->xdp_prog) { rq->buff.map_dir = DMA_BIDIRECTIONAL; + rq->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rq->buff.map_dir = DMA_FROM_DEVICE; + rq->rx_headroom = MLX5_RX_HEADROOM; + } switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, byte_count = rq->buff.wqe_sz; /* calc the required page order */ - frag_sz = MLX5_RX_HEADROOM + + frag_sz = rq->rx_headroom + byte_count /* packet data */ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frag_sz = SKB_DATA_ALIGN(frag_sz); @@ -967,7 +1012,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->channel = c; sq->tc = tc; - err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); + err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false); if (err) return err; @@ -979,12 +1024,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - if (sq->uar.bf_map) { + if (sq->bfreg.wc) set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); - sq->uar_map = sq->uar.bf_map; - } else { - sq->uar_map = sq->uar.map; - } + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; sq->min_inline_mode = @@ -1012,7 +1054,7 @@ err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &sq->uar); + mlx5_free_bfreg(mdev, &sq->bfreg); return err; } @@ -1024,7 +1066,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); - mlx5_unmap_free_uar(priv->mdev, &sq->uar); + mlx5_free_bfreg(priv->mdev, &sq->bfreg); } static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) @@ -1058,7 +1100,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, uar_page, sq->bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); @@ -1216,7 +1258,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -1265,7 +1306,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); - MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1472,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) +{ + return is_kdump_kernel() ? + MLX5E_MIN_NUM_CHANNELS : + min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) @@ -1677,7 +1726,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -2393,7 +2442,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &mdev->mlx5e_res.cq_uar; cq->priv = priv; @@ -2686,7 +2734,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -static struct rtnl_link_stats64 * +static void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2729,7 +2777,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); - return stats; } static void mlx5e_set_rx_mode(struct net_device *dev) @@ -2987,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - if (min_tx_rate) - return -EOPNOTSUPP; - return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, - max_tx_rate); + max_tx_rate, min_tx_rate); } static int mlx5_vport_link2ifla(u8 esw_link) @@ -3159,11 +3203,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bool reset, was_opened; int i; - if (prog && prog->xdp_adjust_head) { - netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - mutex_lock(&priv->state_lock); if ((netdev->features & NETIF_F_LRO) && prog) { @@ -3432,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; } -static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) -{ - switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5_CAP_INLINE_MODE_L2: - *min_inline_mode = MLX5_INLINE_MODE_L2; - break; - case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); - break; - case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - *min_inline_mode = MLX5_INLINE_MODE_NONE; - break; - } -} - u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -3481,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); - priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_sq_size = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ priv->params.rx_cqe_compress_def = false; @@ -3507,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); + mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 850378893b25..2c864574a9d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -374,13 +374,12 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, return -EINVAL; } -static struct rtnl_link_stats64 * +static void mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); - return stats; } static const struct switchdev_ops mlx5e_rep_switchdev_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 06d5e6fecb0a..fd8dff6acc12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -33,6 +33,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <linux/bpf_trace.h> #include <net/busy_poll.h> #include "en.h" #include "en_tc.h" @@ -155,17 +156,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) { bool was_opened; if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) return; - mutex_lock(&priv->state_lock); - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) - goto unlock; + return; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) @@ -176,8 +175,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) if (was_opened) mlx5e_open_locked(priv->netdev); -unlock: - mutex_unlock(&priv->state_lock); } #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) @@ -267,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) if (unlikely(mlx5e_page_alloc_mapped(rq, di))) return -ENOMEM; - wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM); + wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom); return 0; } @@ -647,10 +644,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } -static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, +static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, - unsigned int data_offset, - int len) + const struct xdp_buff *xdp) { struct mlx5e_sq *sq = &rq->channel->xdp_sq; struct mlx5_wq_cyc *wq = &sq->wq; @@ -662,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; + ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; - unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE; - void *data = page_address(di->page) + data_offset; + unsigned int dma_len = xdp->data_end - xdp->data; + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || + MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { + rq->stats.xdp_drop++; + mlx5e_page_release(rq, di, true); + return false; + } if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { if (sq->db.xdp.doorbell) { @@ -674,16 +677,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, } rq->stats.xdp_tx_full++; mlx5e_page_release(rq, di, true); - return; + return false; } + dma_len -= MLX5E_XDP_MIN_INLINE; dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); memset(wqe, 0, sizeof(*wqe)); /* copy the inline part */ - memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); @@ -703,32 +707,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.xdp.doorbell = true; rq->stats.xdp_tx++; + return true; } /* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - const struct bpf_prog *prog, - struct mlx5e_dma_info *di, - void *data, u16 len) +static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { + const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; if (!prog) return false; - xdp.data = data; - xdp.data_end = xdp.data + len; + xdp.data = va + *rx_headroom; + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + act = bpf_prog_run_xdp(prog, &xdp); switch (act) { case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; return false; case XDP_TX: - mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len); + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) + trace_xdp_exception(rq->netdev, prog, act); return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: rq->stats.xdp_drop++; mlx5e_page_release(rq, di, true); @@ -743,15 +754,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + u16 rx_headroom = rq->rx_headroom; bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); - data = va + MLX5_RX_HEADROOM; + data = va + rx_headroom; dma_sync_single_range_for_cpu(rq->pdev, di->addr, - MLX5_RX_HEADROOM, + rx_headroom, rq->buff.wqe_sz, DMA_FROM_DEVICE); prefetch(data); @@ -763,8 +775,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, - cqe_bcnt); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); rcu_read_unlock(); if (consumed) return NULL; /* page/packet was consumed by XDP */ @@ -780,7 +791,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, page_ref_inc(di->page); mlx5e_page_release(rq, di, true); - skb_reserve(skb, MLX5_RX_HEADROOM); + skb_reserve(skb, rx_headroom); skb_put(skb, cqe_bcnt); return skb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index ba5db1dd23a9..53e4992d6511 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -39,7 +39,7 @@ #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ (*(u32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) @@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PHY_STATISTICAL_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_statistical_cntrs.c##_high) +#define PPORT_PHY_STATISTICAL_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ + counter_set.phys_layer_statistical_cntrs.c##_high) #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -215,6 +221,7 @@ struct mlx5e_pport_stats { __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = { { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, }; +static const struct counter_desc pport_phy_statistical_stats_desc[] = { + { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, +}; + static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c) + +struct mlx5e_pcie_stats { + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; +}; + +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ + (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) +#define NUM_PCIE_PERF_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) +#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) @@ -378,6 +413,7 @@ struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; + struct mlx5e_pcie_stats pcie; }; static const struct counter_desc mlx5e_pme_status_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index c5282b6aba8b..e3cf5f484153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -298,6 +298,32 @@ vxlan_match_offload_err: MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } /* Enforce DMAC when offloading incoming tunneled flows. @@ -358,12 +384,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->key); switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: if (parse_tunnel_attr(priv, spec, f)) return -EOPNOTSUPP; break; - case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - netdev_warn(priv->netdev, - "IPv6 tunnel decap offload isn't supported\n"); default: return -EOPNOTSUPP; } @@ -460,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_VLAN, f->mask); if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); @@ -644,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } -static inline int cmp_encap_info(struct mlx5_encap_info *a, - struct mlx5_encap_info *b) +static inline int cmp_encap_info(struct ip_tunnel_key *a, + struct ip_tunnel_key *b) { return memcmp(a, b, sizeof(*a)); } -static inline int hash_encap_info(struct mlx5_encap_info *info) +static inline int hash_encap_info(struct ip_tunnel_key *key) { - return jhash(info, sizeof(*info), 0); + return jhash(key, sizeof(*key), 0); } static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, @@ -660,13 +684,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - __be32 *saddr, int *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct rtable *rt; struct neighbour *n = NULL; - int ttl; #if IS_ENABLED(CONFIG_INET) int ret; @@ -684,16 +706,54 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, else *out_dev = rt->dst.dev; - ttl = ip4_dst_hoplimit(&rt->dst); + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) return -ENOMEM; *out_n = n; - *saddr = fl4->saddr; - *out_ttl = ttl; + return 0; +} + +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + int *out_ttl) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + + dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); + if (dst->error) { + ret = dst->error; + dst_release(dst); + return ret; + } + + *out_ttl = ip6_dst_hoplimit(dst); + /* if the egress device isn't on the same HW e-switch, we use the uplink */ + if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) + *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + else + *out_dev = dst->dev; +#else + return -EOPNOTSUPP; +#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; return 0; } @@ -733,19 +793,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, return encap_size; } +static int gen_vxlan_header_ipv6(struct net_device *out_dev, + char buf[], + unsigned char h_dest[ETH_ALEN], + int ttl, + struct in6_addr *daddr, + struct in6_addr *saddr, + __be16 udp_dst_port, + __be32 vx_vni) +{ + int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; + struct ethhdr *eth = (struct ethhdr *)buf; + struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); + struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); + struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + memset(buf, 0, encap_size); + + ether_addr_copy(eth->h_dest, h_dest); + ether_addr_copy(eth->h_source, out_dev->dev_addr); + eth->h_proto = htons(ETH_P_IPV6); + + ip6_flow_hdr(ip6h, 0, 0); + /* the HW fills up ipv6 payload len */ + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + udp->dest = udp_dst_port; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vx_vni); + + return encap_size; +} + static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5_encap_entry *e, struct net_device **out_dev) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, ttl, err; struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; - int encap_size; - __be32 saddr; - int ttl; - int err; encap_header = kzalloc(max_encap_size, GFP_KERNEL); if (!encap_header) @@ -754,37 +847,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: fl4.flowi4_proto = IPPROTO_UDP; - fl4.fl4_dport = e->tun_info.tp_dst; + fl4.fl4_dport = tun_key->tp_dst; break; default: err = -EOPNOTSUPP; goto out; } - fl4.daddr = e->tun_info.daddr; + fl4.flowi4_tos = tun_key->tos; + fl4.daddr = tun_key->u.ipv4.dst; + fl4.saddr = tun_key->u.ipv4.src; err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, - &fl4, &n, &saddr, &ttl); + &fl4, &n, &ttl); if (err) goto out; + if (!(n->nud_state & NUD_VALID)) { + pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + err = -EOPNOTSUPP; + goto out; + } + e->n = n; e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + e->h_dest, ttl, + fl4.daddr, + fl4.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + encap_size, encap_header, &e->encap_id); +out: + if (err && n) + neigh_release(n); + kfree(encap_header); + return err; +} + +static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5_encap_entry *e, + struct net_device **out_dev) + +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + int encap_size, err, ttl = 0; + struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + char *encap_header; + + encap_header = kzalloc(max_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = tun_key->tp_dst; + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + fl6.daddr = tun_key->u.ipv6.dst; + fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, + &fl6, &n, &ttl); + if (err) + goto out; + if (!(n->nud_state & NUD_VALID)) { - pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); err = -EOPNOTSUPP; goto out; } + e->n = n; + e->out_dev = *out_dev; + neigh_ha_snapshot(e->h_dest, n, *out_dev); switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: - encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, e->h_dest, ttl, - e->tun_info.daddr, - saddr, e->tun_info.tp_dst, - e->tun_info.tun_id); + &fl6.daddr, + &fl6.saddr, tun_key->tp_dst, + tunnel_id_to_key32(tun_key->tun_id)); break; default: err = -EOPNOTSUPP; @@ -808,13 +972,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); struct ip_tunnel_key *key = &tun_info->key; - struct mlx5_encap_info info; struct mlx5_encap_entry *e; struct net_device *out_dev; + int tunnel_type, err = -EOPNOTSUPP; uintptr_t hash_key; bool found = false; - int tunnel_type; - int err; /* udp dst port must be set */ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) @@ -830,8 +992,6 @@ vxlan_encap_offload_err: if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - info.tp_dst = key->tp_dst; - info.tun_id = tunnel_id_to_key32(key->tun_id); tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { netdev_warn(priv->netdev, @@ -839,22 +999,11 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - switch (family) { - case AF_INET: - info.daddr = key->u.ipv4.dst; - break; - case AF_INET6: - netdev_warn(priv->netdev, - "IPv6 tunnel encap offload isn't supported\n"); - default: - return -EOPNOTSUPP; - } - - hash_key = hash_encap_info(&info); + hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info, &info)) { + if (!cmp_encap_info(&e->tun_info.key, key)) { found = true; break; } @@ -869,11 +1018,15 @@ vxlan_encap_offload_err: if (!e) return -ENOMEM; - e->tun_info = info; + e->tun_info = *tun_info; e->tunnel_type = tunnel_type; INIT_LIST_HEAD(&e->flows); - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + if (family == AF_INET) + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + else if (family == AF_INET6) + err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); + if (err) goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 8ffcc8808e50..ea5d8d37a75c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -54,6 +54,7 @@ enum { MLX5_NUM_SPARE_EQE = 0x80, MLX5_NUM_ASYNC_EQE = 0x100, MLX5_NUM_CMD_EQE = 32, + MLX5_NUM_PF_DRAIN = 64, }; enum { @@ -153,6 +154,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: return "MLX5_EVENT_TYPE_PAGE_FAULT"; + case MLX5_EVENT_TYPE_PPS_EVENT: + return "MLX5_EVENT_TYPE_PPS_EVENT"; default: return "Unrecognized event"; } @@ -188,10 +191,193 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm) mb(); } -static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +static void eqe_pf_action(struct work_struct *work) +{ + struct mlx5_pagefault *pfault = container_of(work, + struct mlx5_pagefault, + work); + struct mlx5_eq *eq = pfault->eq; + + mlx5_core_page_fault(eq->dev, pfault); + mempool_free(pfault, eq->pf_ctx.pool); +} + +static void eq_pf_process(struct mlx5_eq *eq) +{ + struct mlx5_core_dev *dev = eq->dev; + struct mlx5_eqe_page_fault *pf_eqe; + struct mlx5_pagefault *pfault; + struct mlx5_eqe *eqe; + int set_ci = 0; + + while ((eqe = next_eqe_sw(eq))) { + pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC); + if (!pfault) { + schedule_work(&eq->pf_ctx.work); + break; + } + + dma_rmb(); + pf_eqe = &eqe->data.page_fault; + pfault->event_subtype = eqe->sub_type; + pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); + + mlx5_core_dbg(dev, + "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", + eqe->sub_type, pfault->bytes_committed); + + switch (eqe->sub_type) { + case MLX5_PFAULT_SUBTYPE_RDMA: + /* RDMA based event */ + pfault->type = + be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->rdma.pftype_token) & + MLX5_24BIT_MASK; + pfault->rdma.r_key = + be32_to_cpu(pf_eqe->rdma.r_key); + pfault->rdma.packet_size = + be16_to_cpu(pf_eqe->rdma.packet_length); + pfault->rdma.rdma_op_len = + be32_to_cpu(pf_eqe->rdma.rdma_op_len); + pfault->rdma.rdma_va = + be64_to_cpu(pf_eqe->rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", + pfault->type, pfault->token, + pfault->rdma.r_key); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", + pfault->rdma.rdma_op_len, + pfault->rdma.rdma_va); + break; + + case MLX5_PFAULT_SUBTYPE_WQE: + /* WQE based event */ + pfault->type = + be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; + pfault->token = + be32_to_cpu(pf_eqe->wqe.token); + pfault->wqe.wq_num = + be32_to_cpu(pf_eqe->wqe.pftype_wq) & + MLX5_24BIT_MASK; + pfault->wqe.wqe_index = + be16_to_cpu(pf_eqe->wqe.wqe_index); + pfault->wqe.packet_size = + be16_to_cpu(pf_eqe->wqe.packet_length); + mlx5_core_dbg(dev, + "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", + pfault->type, pfault->token, + pfault->wqe.wq_num, + pfault->wqe.wqe_index); + break; + + default: + mlx5_core_warn(dev, + "Unsupported page fault event sub-type: 0x%02hhx\n", + eqe->sub_type); + /* Unsupported page faults should still be + * resolved by the page fault handler + */ + } + + pfault->eq = eq; + INIT_WORK(&pfault->work, eqe_pf_action); + queue_work(eq->pf_ctx.wq, &pfault->work); + + ++eq->cons_index; + ++set_ci; + + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); +} + +static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + unsigned long flags; + + if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) { + eq_pf_process(eq); + spin_unlock_irqrestore(&eq->pf_ctx.lock, flags); + } else { + schedule_work(&eq->pf_ctx.work); + } + + return IRQ_HANDLED; +} + +/* mempool_refill() was proposed but unfortunately wasn't accepted + * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html + * Chip workaround. + */ +static void mempool_refill(mempool_t *pool) +{ + while (pool->curr_nr < pool->min_nr) + mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); +} + +static void eq_pf_action(struct work_struct *work) +{ + struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work); + + mempool_refill(eq->pf_ctx.pool); + + spin_lock_irq(&eq->pf_ctx.lock); + eq_pf_process(eq); + spin_unlock_irq(&eq->pf_ctx.lock); +} + +static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) +{ + spin_lock_init(&pf_ctx->lock); + INIT_WORK(&pf_ctx->work, eq_pf_action); + + pf_ctx->wq = alloc_ordered_workqueue(name, + WQ_MEM_RECLAIM); + if (!pf_ctx->wq) + return -ENOMEM; + + pf_ctx->pool = mempool_create_kmalloc_pool + (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault)); + if (!pf_ctx->pool) + goto err_wq; + + return 0; +err_wq: + destroy_workqueue(pf_ctx->wq); + return -ENOMEM; +} + +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error) +{ + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; + + MLX5_SET(page_fault_resume_in, in, opcode, + MLX5_CMD_OP_PAGE_FAULT_RESUME); + MLX5_SET(page_fault_resume_in, in, error, !!error); + MLX5_SET(page_fault_resume_in, in, page_fault_type, type); + MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); + MLX5_SET(page_fault_resume_in, in, token, token); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); +#endif + +static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) { + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; struct mlx5_eqe *eqe; - int eqes_found = 0; int set_ci = 0; u32 cqn = -1; u32 rsn; @@ -276,12 +462,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } break; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - case MLX5_EVENT_TYPE_PAGE_FAULT: - mlx5_eq_pagefault(dev, eqe); - break; -#endif - #ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); @@ -292,6 +472,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_port_module_event(dev, eqe); break; + case MLX5_EVENT_TYPE_PPS_EVENT: + if (dev->event) + dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); + break; default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -299,7 +483,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } ++eq->cons_index; - eqes_found = 1; ++set_ci; /* The HCA will think the queue has overflowed if we @@ -319,17 +502,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (cqn != -1) tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; -} - -static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) -{ - struct mlx5_eq *eq = eq_ptr; - struct mlx5_core_dev *dev = eq->dev; - - mlx5_eq_int(dev, eq); - - /* MSI-X vectors always belong to us */ return IRQ_HANDLED; } @@ -345,22 +517,32 @@ static void init_eq_buf(struct mlx5_eq *eq) } int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar) + int nent, u64 mask, const char *name, + enum mlx5_eq_type type) { u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; + irq_handler_t handler; __be64 *pas; void *eqc; int inlen; u32 *in; int err; + eq->type = type; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) + handler = mlx5_eq_pf_int; + else +#endif + handler = mlx5_eq_int; + init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + @@ -380,7 +562,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); - MLX5_SET(eqc, eqc, uar_page, uar->index); + MLX5_SET(eqc, eqc, uar_page, priv->uar->index); MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, log_page_size, eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -395,8 +577,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(eq->irqn, mlx5_msix_handler, 0, + eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; + err = request_irq(eq->irqn, handler, 0, priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -405,11 +587,20 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, if (err) goto err_irq; - INIT_LIST_HEAD(&eq->tasklet_ctx.list); - INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); - spin_lock_init(&eq->tasklet_ctx.lock); - tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, - (unsigned long)&eq->tasklet_ctx); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (type == MLX5_EQ_TYPE_PF) { + err = init_pf_ctx(&eq->pf_ctx, name); + if (err) + goto err_irq; + } else +#endif + { + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + } /* EQs are created in ARMED state */ @@ -444,7 +635,16 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); synchronize_irq(eq->irqn); - tasklet_disable(&eq->tasklet_ctx.task); + + if (eq->type == MLX5_EQ_TYPE_COMP) { + tasklet_disable(&eq->tasklet_ctx.task); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + } else if (eq->type == MLX5_EQ_TYPE_PF) { + cancel_work_sync(&eq->pf_ctx.work); + destroy_workqueue(eq->pf_ctx.wq); + mempool_destroy(eq->pf_ctx.pool); +#endif + } mlx5_buf_free(dev, &eq->buf); return err; @@ -479,8 +679,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, pg)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && MLX5_CAP_GEN(dev, vport_group_manager) && @@ -492,9 +690,12 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); + if (MLX5_CAP_GEN(dev, pps)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); return err; @@ -504,7 +705,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", &dev->priv.uuari.uars[0]); + "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; @@ -514,13 +715,33 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) MLX5_EQ_VEC_PAGES, /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - &dev->priv.uuari.uars[0]); + MLX5_EQ_TYPE_ASYNC); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); goto err2; } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_create_map_eq(dev, &table->pfault_eq, + MLX5_EQ_VEC_PFAULT, + MLX5_NUM_ASYNC_EQE, + 1 << MLX5_EVENT_TYPE_PAGE_FAULT, + "mlx5_page_fault_eq", + MLX5_EQ_TYPE_PF); + if (err) { + mlx5_core_warn(dev, "failed to create page fault EQ %d\n", + err); + goto err3; + } + } + return err; +err3: + mlx5_destroy_unmap_eq(dev, &table->pages_eq); +#else + return err; +#endif err2: mlx5_destroy_unmap_eq(dev, &table->async_eq); @@ -536,6 +757,14 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; int err; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); + if (err) + return err; + } +#endif + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0c8bf014453..fcd5bc7e31db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); @@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); @@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); @@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->info.vlan || vport->info.qos) - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); if (vport->info.spoofchk) { MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); @@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); @@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) } static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, - u32 initial_max_rate) + u32 initial_max_rate, u32 initial_bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, initial_max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) } static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, - u32 max_rate) + u32 max_rate, u32 bw_share) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, esw->qos.root_tsar_id); MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_apply_vport_conf(esw, vport); /* Attach vport to the eswitch rate limiter */ - if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate, + vport->qos.bw_share)) esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); /* Sync with current vport context */ @@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); @@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate) +static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) { + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); struct mlx5_vport *evport; + u32 max_guarantee = 0; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled || evport->info.min_rate < max_guarantee) + continue; + max_guarantee = evport->info.min_rate; + } + + return max_t(u32, max_guarantee / fw_max_bw_share, 1); +} + +static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 vport_max_rate; + u32 vport_min_rate; + u32 bw_share; + int err; + int i; + + for (i = 0; i <= esw->total_vports; i++) { + evport = &esw->vports[i]; + if (!evport->enabled) + continue; + vport_min_rate = evport->info.min_rate; + vport_max_rate = evport->info.max_rate; + bw_share = MLX5_MIN_BW_SHARE; + + if (vport_min_rate) + bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, + divider, + fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_vport_qos_config(esw, i, vport_max_rate, + bw_share); + if (!err) + evport->qos.bw_share = bw_share; + else + return err; + } + + return 0; +} + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + struct mlx5_vport *evport; + u32 previous_min_rate; + u32 divider; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) + return -EOPNOTSUPP; mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - err = esw_vport_qos_config(esw, vport, max_rate); + + if (min_rate == evport->info.min_rate) + goto set_max_rate; + + previous_min_rate = evport->info.min_rate; + evport->info.min_rate = min_rate; + divider = calculate_vports_min_rate_divider(esw); + err = normalize_vports_min_rate(esw, divider); + if (err) { + evport->info.min_rate = previous_min_rate; + goto unlock; + } + +set_max_rate: + if (max_rate == evport->info.max_rate) + goto unlock; + + err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share); if (!err) evport->info.max_rate = max_rate; +unlock: mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 8661dd3f542c..5b78883d5654 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,6 +36,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> #include <net/devlink.h> +#include <net/ip_tunnels.h> #include <linux/mlx5/device.h> #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -49,6 +50,11 @@ #define FDB_UPLINK_VPORT 0xffff +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -115,6 +121,7 @@ struct mlx5_vport_info { u8 qos; u64 node_guid; int link_state; + u32 min_rate; u32 max_rate; bool spoofchk; bool trusted; @@ -137,6 +144,7 @@ struct mlx5_vport { struct { bool enabled; u32 esw_tsar_ix; + u32 bw_share; } qos; bool enabled; @@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, - int vport, u32 max_rate); +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, + u32 max_rate, u32 min_rate); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -274,18 +282,12 @@ enum { #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 -struct mlx5_encap_info { - __be32 daddr; - __be32 tun_id; - __be16 tp_dst; -}; - struct mlx5_encap_entry { struct hlist_node encap_hlist; struct list_head flows; u32 encap_id; struct neighbour *n; - struct mlx5_encap_info tun_info; + struct ip_tunnel_info tun_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 595f7c7383b3..4f5b0d47d5f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -402,19 +402,18 @@ out: } #define MAX_PF_SQ 256 -#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ #define ESW_OFFLOADS_NUM_GROUPS 4 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int table_size, ix, esw_size, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; struct mlx5_flow_group *g; u32 *flow_group_in; void *match_criteria; - int table_size, ix, err = 0; u32 flags = 0; flow_group_in = mlx5_vzalloc(inlen); @@ -428,15 +427,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), + MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + + esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, - ESW_OFFLOADS_NUM_ENTRIES, + esw_size, ESW_OFFLOADS_NUM_GROUPS, 0, flags); if (IS_ERR(fdb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b53fc85a2375..b64a781c7e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, int err; u32 *in; - if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) + if (size > max_encap_size) { + mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", + size, max_encap_size); return -EINVAL; + } - in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6346a8f5883b..ce3d92106386 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 5718aada6605..d0bbefa08af7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -91,6 +91,20 @@ out: } EXPORT_SYMBOL(mlx5_core_query_vendor_id); +static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_pcam_reg(dev, dev->caps.pcam, + MLX5_PCAM_FEATURE_ENHANCED_FEATURES, + MLX5_PCAM_REGS_5000_TO_507F); +} + +static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_mcam_reg(dev, dev->caps.mcam, + MLX5_MCAM_FEATURE_ENHANCED_FEATURES, + MLX5_MCAM_REGS_FIRST_128); +} + int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; @@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, pcam_reg)) + mlx5_get_pcam_reg(dev); + + if (MLX5_CAP_GEN(dev, mcam_reg)) + mlx5_get_mcam_reg(dev); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 5bcf93422ee0..d0515391d33b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd) } } -static u16 get_maj(u32 fw) -{ - return fw >> 28; -} - -static u16 get_min(u32 fw) -{ - return fw >> 16 & 0xfff; -} - -static u16 get_sub(u32 fw) -{ - return fw & 0xffff; -} - static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev) dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); - fw = ioread32be(&h->fw_ver); - sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw)); + sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + fw = ioread32be(&h->fw_ver); + dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3c315eb8d270..f7e50ba67f94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, + .mr_cache[16] = { + .size = 8, + .limit = 4 + }, + .mr_cache[17] = { + .size = 8, + .limit = 4 + }, + .mr_cache[18] = { + .size = 8, + .limit = 4 + }, + .mr_cache[19] = { + .size = 4, + .limit = 2 + }, + .mr_cache[20] = { + .size = 4, + .limit = 2 + }, }, }; @@ -398,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->hca_caps_max[cap_type], hca_caps, + memcpy(dev->caps.hca_max[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->hca_caps_cur[cap_type], hca_caps, + memcpy(dev->caps.hca_cur[cap_type], hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -493,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", @@ -517,6 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + /* If the HCA supports 4K UARs use it */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); err = set_caps(dev, set_ctx, set_sz, @@ -739,7 +763,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, &dev->priv.uuari.uars[0]); + name, MLX5_EQ_TYPE_COMP); if (err) { kfree(eq); goto clean; @@ -899,8 +923,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } - MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); - err = mlx5_init_cq_table(dev); if (err) { dev_err(&pdev->dev, "failed to initialize cq table\n"); @@ -1079,8 +1101,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_cleanup_once; } - err = mlx5_alloc_uuars(dev, &priv->uuari); - if (err) { + dev->priv.uar = mlx5_get_uars_page(dev); + if (!dev->priv.uar) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); goto err_disable_msix; } @@ -1088,7 +1110,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_free_uar; + goto err_put_uars; } err = alloc_comp_eqs(dev); @@ -1154,8 +1176,8 @@ err_affinity_hints: err_stop_eqs: mlx5_stop_eqs(dev); -err_free_uar: - mlx5_free_uuars(dev, &priv->uuari); +err_put_uars: + mlx5_put_uars_page(dev, priv->uar); err_disable_msix: mlx5_disable_msix(dev); @@ -1218,7 +1240,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); - mlx5_free_uuars(dev, &priv->uuari); + mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) mlx5_cleanup_once(dev); @@ -1284,10 +1306,24 @@ static int init_one(struct pci_dev *pdev, spin_lock_init(&priv->ctx_lock); mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + err = init_srcu_struct(&priv->pfault_srcu); + if (err) { + dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n", + err); + goto clean_dev; + } +#endif + mutex_init(&priv->bfregs.reg_head.lock); + mutex_init(&priv->bfregs.wc_head.lock); + INIT_LIST_HEAD(&priv->bfregs.reg_head.list); + INIT_LIST_HEAD(&priv->bfregs.wc_head.list); + err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); - goto clean_dev; + goto clean_srcu; } err = mlx5_health_init(dev); @@ -1304,9 +1340,7 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } - err = request_module_nowait(MLX5_IB_MOD); - if (err) - pr_info("failed request module on %s\n", MLX5_IB_MOD); + request_module_nowait(MLX5_IB_MOD); err = devlink_register(devlink, &pdev->dev); if (err) @@ -1321,7 +1355,11 @@ clean_health: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); +clean_srcu: +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); @@ -1346,6 +1384,9 @@ static void remove_one(struct pci_dev *pdev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + cleanup_srcu_struct(&priv->pfault_srcu); +#endif pci_set_drvdata(pdev, NULL); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index d4a99c9757cb..b3dabe6e8836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -86,6 +86,8 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); +void mlx5_core_page_fault(struct mlx5_core_dev *dev, + struct mlx5_pagefault *pfault); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); @@ -111,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group); +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, + u8 access_reg_group); + void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); @@ -136,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); +int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); + void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fd12e0a377a5..141583daf5a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -74,6 +74,30 @@ out: } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); +int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(pcam_reg); + + MLX5_SET(pcam_reg, in, feature_group, feature_group); + MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0); +} + +int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group, + u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(mcam_reg); + + MLX5_SET(mcam_reg, in, feature_group, feature_group); + MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0); +} + struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; @@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) module_num, mlx5_pme_status[module_status - 1], mlx5_pme_error[error_type]); } + +int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out, + sizeof(out), MLX5_REG_MTPPS, 0, 1); +} + +int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + int err = 0; + + MLX5_SET(mtppse_reg, in, pin, pin); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 0); + if (err) + return err; + + *arm = MLX5_GET(mtppse_reg, in, event_arm); + *mode = MLX5_GET(mtppse_reg, in, event_generation_mode); + + return err; +} + +int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode) +{ + u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0}; + + MLX5_SET(mtppse_reg, in, pin, pin); + MLX5_SET(mtppse_reg, in, event_arm, arm); + MLX5_SET(mtppse_reg, in, event_generation_mode, mode); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MTPPSE, 0, 1); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index d0a4005fe63a..cbbcef2884be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -143,95 +143,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) mlx5_core_put_rsc(common); } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) -{ - struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; - int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; - struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); - struct mlx5_core_qp *qp = - container_of(common, struct mlx5_core_qp, common); - struct mlx5_pagefault pfault; - - if (!qp) { - mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", - qpn); - return; - } - - pfault.event_subtype = eqe->sub_type; - pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & - (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); - pfault.bytes_committed = be32_to_cpu( - pf_eqe->bytes_committed); - - mlx5_core_dbg(dev, - "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", - eqe->sub_type, pfault.flags); - - switch (eqe->sub_type) { - case MLX5_PFAULT_SUBTYPE_RDMA: - /* RDMA based event */ - pfault.rdma.r_key = - be32_to_cpu(pf_eqe->rdma.r_key); - pfault.rdma.packet_size = - be16_to_cpu(pf_eqe->rdma.packet_length); - pfault.rdma.rdma_op_len = - be32_to_cpu(pf_eqe->rdma.rdma_op_len); - pfault.rdma.rdma_va = - be64_to_cpu(pf_eqe->rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", - qpn, pfault.rdma.r_key); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_op_len: 0x%08x,\n", - pfault.rdma.rdma_op_len); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_va: 0x%016llx,\n", - pfault.rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - case MLX5_PFAULT_SUBTYPE_WQE: - /* WQE based event */ - pfault.wqe.wqe_index = - be16_to_cpu(pf_eqe->wqe.wqe_index); - pfault.wqe.packet_size = - be16_to_cpu(pf_eqe->wqe.packet_length); - mlx5_core_dbg(dev, - "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", - qpn, pfault.wqe.wqe_index); - mlx5_core_dbg(dev, - "PAGE_FAULT: bytes_committed: 0x%06x\n", - pfault.bytes_committed); - break; - - default: - mlx5_core_warn(dev, - "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", - eqe->sub_type, qpn); - /* Unsupported page faults should still be resolved by the - * page fault handler - */ - } - - if (qp->pfault_handler) { - qp->pfault_handler(qp, &pfault); - } else { - mlx5_core_err(dev, - "ODP event for QP %08x, without a fault handler in QP\n", - qpn); - /* Page fault will remain unresolved. QP will hang until it is - * destroyed - */ - } - - mlx5_core_put_rsc(common); -} -#endif - static int create_qprqsq_common(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int rsc_type) @@ -506,31 +417,6 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 flags, int error) -{ - u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; - - MLX5_SET(page_fault_resume_in, in, opcode, - MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, qpn, qpn); - - if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) - MLX5_SET(page_fault_resume_in, in, req_res, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) - MLX5_SET(page_fault_resume_in, in, read_write, 1); - if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) - MLX5_SET(page_fault_resume_in, in, rdma, 1); - if (error) - MLX5_SET(page_fault_resume_in, in, error, 1); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} -EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); -#endif - int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ab0b896621a0..2e6b0f290ddc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -37,11 +37,6 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -enum { - NUM_DRIVER_UARS = 4, - NUM_LOW_LAT_UUARS = 4, -}; - int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; @@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) } EXPORT_SYMBOL(mlx5_cmd_free_uar); -static int need_uuar_lock(int uuarn) +static int uars_per_sys_page(struct mlx5_core_dev *mdev) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; - - if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) - return 0; + if (MLX5_CAP_GEN(mdev, uar_4k)) + return MLX5_CAP_GEN(mdev, num_of_uars_per_page); return 1; } -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) { - int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; - struct mlx5_bf *bf; - phys_addr_t addr; - int err; + u32 system_page_index; + + if (MLX5_CAP_GEN(mdev, uar_4k)) + system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT); + else + system_page_index = index; + + return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index; +} + +static void up_rel_func(struct kref *kref) +{ + struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); + + list_del(&up->list); + if (mlx5_cmd_free_uar(up->mdev, up->index)) + mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); + kfree(up->reg_bitmap); + kfree(up->fp_bitmap); + kfree(up); +} + +static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, + bool map_wc) +{ + struct mlx5_uars_page *up; + int err = -ENOMEM; + phys_addr_t pfn; + int bfregs; int i; - uuari->num_uars = NUM_DRIVER_UARS; - uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR; + up = kzalloc(sizeof(*up), GFP_KERNEL); + if (!up) + return ERR_PTR(err); - mutex_init(&uuari->lock); - uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); - if (!uuari->uars) - return -ENOMEM; + up->mdev = mdev; + up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->reg_bitmap) + goto error1; - uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); - if (!uuari->bfs) { - err = -ENOMEM; - goto out_uars; - } + up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + if (!up->fp_bitmap) + goto error1; - uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), - GFP_KERNEL); - if (!uuari->bitmap) { - err = -ENOMEM; - goto out_bfs; - } + for (i = 0; i < bfregs; i++) + if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR) + set_bit(i, up->reg_bitmap); + else + set_bit(i, up->fp_bitmap); - uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); - if (!uuari->count) { - err = -ENOMEM; - goto out_bitmap; - } + up->bfregs = bfregs; + up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; + up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR; - for (i = 0; i < uuari->num_uars; i++) { - err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); - if (err) - goto out_count; + err = mlx5_cmd_alloc_uar(mdev, &up->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + goto error1; + } - addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); - uuari->uars[i].map = ioremap(addr, PAGE_SIZE); - if (!uuari->uars[i].map) { - mlx5_cmd_free_uar(dev, uuari->uars[i].index); + pfn = uar2pfn(mdev, up->index); + if (map_wc) { + up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { + err = -EAGAIN; + goto error2; + } + } else { + up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!up->map) { err = -ENOMEM; - goto out_count; + goto error2; } - mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", - uuari->uars[i].index, uuari->uars[i].map); - } - - for (i = 0; i < tot_uuars; i++) { - bf = &uuari->bfs[i]; - - bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; - bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; - bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; - bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * - (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + - MLX5_BF_OFFSET; - bf->need_lock = need_uuar_lock(i); - spin_lock_init(&bf->lock); - spin_lock_init(&bf->lock32); - bf->uuarn = i; } + kref_init(&up->ref_count); + mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n", + up->index, up->bfregs); + return up; + +error2: + if (mlx5_cmd_free_uar(mdev, up->index)) + mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index); +error1: + kfree(up->fp_bitmap); + kfree(up->reg_bitmap); + kfree(up); + return ERR_PTR(err); +} - return 0; - -out_count: - for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev) +{ + struct mlx5_uars_page *ret; + + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + if (list_empty(&mdev->priv.bfregs.reg_head.list)) { + ret = alloc_uars_page(mdev, false); + if (IS_ERR(ret)) { + ret = NULL; + goto out; + } + list_add(&ret->list, &mdev->priv.bfregs.reg_head.list); + } else { + ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, + struct mlx5_uars_page, list); + kref_get(&ret->ref_count); } - kfree(uuari->count); +out: + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); -out_bitmap: - kfree(uuari->bitmap); - -out_bfs: - kfree(uuari->bfs); + return ret; +} +EXPORT_SYMBOL(mlx5_get_uars_page); -out_uars: - kfree(uuari->uars); - return err; +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up) +{ + mutex_lock(&mdev->priv.bfregs.reg_head.lock); + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(&mdev->priv.bfregs.reg_head.lock); } +EXPORT_SYMBOL(mlx5_put_uars_page); -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi) { - int i = uuari->num_uars; + /* return the offset in bytes from the start of the page to the + * blue flame area of the UAR + */ + return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE + + (dbi % MLX5_BFREGS_PER_UAR) * + (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; +} - for (i--; i >= 0; i--) { - iounmap(uuari->uars[i].map); - mlx5_cmd_free_uar(dev, uuari->uars[i].index); +static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) +{ + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct list_head *head; + unsigned long *bitmap; + unsigned int *avail; + struct mutex *lock; /* pointer to right mutex */ + int dbi; + + bfregs = &mdev->priv.bfregs; + if (map_wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; } - - kfree(uuari->count); - kfree(uuari->bitmap); - kfree(uuari->bfs); - kfree(uuari->uars); + mutex_lock(lock); + if (list_empty(head)) { + up = alloc_uars_page(mdev, map_wc); + if (IS_ERR(up)) { + mutex_unlock(lock); + return PTR_ERR(up); + } + list_add(&up->list, head); + } else { + up = list_entry(head->next, struct mlx5_uars_page, list); + kref_get(&up->ref_count); + } + if (fast_path) { + bitmap = up->fp_bitmap; + avail = &up->fp_avail; + } else { + bitmap = up->reg_bitmap; + avail = &up->reg_avail; + } + dbi = find_first_bit(bitmap, up->bfregs); + clear_bit(dbi, bitmap); + (*avail)--; + if (!(*avail)) + list_del(&up->list); + + bfreg->map = up->map + map_offset(mdev, dbi); + bfreg->up = up; + bfreg->wc = map_wc; + bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR; + mutex_unlock(lock); return 0; } -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, - bool map_wc) +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path) { - phys_addr_t pfn; - phys_addr_t uar_bar_start; int err; - err = mlx5_cmd_alloc_uar(mdev, &uar->index); - if (err) { - mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); - return err; - } + err = alloc_bfreg(mdev, bfreg, map_wc, fast_path); + if (!err) + return 0; - uar_bar_start = pci_resource_start(mdev->pdev, 0); - pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; + if (err == -EAGAIN && map_wc) + return alloc_bfreg(mdev, bfreg, false, fast_path); - if (map_wc) { - uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->bf_map) { - mlx5_core_warn(mdev, "ioremap_wc() failed\n"); - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } - } else { - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) - goto err_free_uar; - } + return err; +} +EXPORT_SYMBOL(mlx5_alloc_bfreg); - return 0; +static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev, + struct mlx5_uars_page *up, + struct mlx5_sq_bfreg *bfreg) +{ + unsigned int uar_idx; + unsigned int bfreg_idx; + unsigned int bf_reg_size; -err_free_uar: - mlx5_core_warn(mdev, "ioremap() failed\n"); - err = -ENOMEM; - mlx5_cmd_free_uar(mdev, uar->index); + bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); - return err; + uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT; + bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size; + + return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx; } -EXPORT_SYMBOL(mlx5_alloc_map_uar); -void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg) { - if (uar->map) - iounmap(uar->map); - else - iounmap(uar->bf_map); - mlx5_cmd_free_uar(mdev, uar->index); + struct mlx5_bfreg_data *bfregs; + struct mlx5_uars_page *up; + struct mutex *lock; /* pointer to right mutex */ + unsigned int dbi; + bool fp; + unsigned int *avail; + unsigned long *bitmap; + struct list_head *head; + + bfregs = &mdev->priv.bfregs; + if (bfreg->wc) { + head = &bfregs->wc_head.list; + lock = &bfregs->wc_head.lock; + } else { + head = &bfregs->reg_head.list; + lock = &bfregs->reg_head.lock; + } + up = bfreg->up; + dbi = addr_to_dbi_in_syspage(mdev, up, bfreg); + fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR; + if (fp) { + avail = &up->fp_avail; + bitmap = up->fp_bitmap; + } else { + avail = &up->reg_avail; + bitmap = up->reg_bitmap; + } + mutex_lock(lock); + (*avail)++; + set_bit(dbi, bitmap); + if (*avail == 1) + list_add_tail(&up->list, head); + + kref_put(&up->ref_count, up_rel_func); + mutex_unlock(lock); } -EXPORT_SYMBOL(mlx5_unmap_free_uar); +EXPORT_SYMBOL(mlx5_free_bfreg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 7129c30a2ab4..15c2294dd2b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); + break; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(mlx5_query_min_inline); + int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 16f44b9aa076..76a7574c3c7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -73,6 +73,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q + select PARMAN default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index fe8dadba15ab..6b6c30deee83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o -mlxsw_core-objs := core.o +mlxsw_core-objs := core.o core_acl_flex_keys.o \ + core_acl_flex_actions.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o @@ -13,7 +14,8 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o + spectrum_kvdl.o spectrum_acl_tcam.o \ + spectrum_acl.o spectrum_flower.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 56e19b0d2f8f..a1b48421648a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1132,12 +1132,12 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); -/* cmd_mbox_sw2hw_eq_int_oi +/* cmd_mbox_sw2hw_eq_oi * When set, overrun ignore is enabled. */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); -/* cmd_mbox_sw2hw_eq_int_st +/* cmd_mbox_sw2hw_eq_st * Event delivery state machine * 0x0 - FIRED * 0x1 - ARMED (Request for Notification) @@ -1146,19 +1146,19 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2); -/* cmd_mbox_sw2hw_eq_int_log_eq_size +/* cmd_mbox_sw2hw_eq_log_eq_size * Log (base 2) of the EQ size (in entries). */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4); -/* cmd_mbox_sw2hw_eq_int_producer_counter +/* cmd_mbox_sw2hw_eq_producer_counter * Producer Counter. The counter is incremented for each EQE that is written * by the HW to the EQ. * Maintained by HW (valid for the QUERY_EQ command only) */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16); -/* cmd_mbox_sw2hw_eq_int_pa +/* cmd_mbox_sw2hw_eq_pa * Physical Address. */ MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c new file mode 100644 index 000000000000..34e2fefb0a25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -0,0 +1,685 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/rhashtable.h> +#include <linux/list.h> + +#include "item.h" +#include "core_acl_flex_actions.h" + +enum mlxsw_afa_set_type { + MLXSW_AFA_SET_TYPE_NEXT, + MLXSW_AFA_SET_TYPE_GOTO, +}; + +/* afa_set_type + * Type of the record at the end of the action set. + */ +MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4); + +/* afa_set_next_action_set_ptr + * A pointer to the next action set in the KVD Centralized database. + */ +MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24); + +/* afa_set_goto_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + */ +MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1); + +enum mlxsw_afa_set_goto_binding_cmd { + /* continue go the next binding point */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, + /* jump to the next binding point no return */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, + /* terminate the acl binding */ + MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4, +}; + +/* afa_set_goto_binding_cmd */ +MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3); + +/* afa_set_goto_next_binding + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + */ +MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16); + +/* afa_all_action_type + * Action Type. + */ +MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6); + +struct mlxsw_afa { + unsigned int max_acts_per_set; + const struct mlxsw_afa_ops *ops; + void *ops_priv; + struct rhashtable set_ht; + struct rhashtable fwd_entry_ht; +}; + +#define MLXSW_AFA_SET_LEN 0xA8 + +struct mlxsw_afa_set_ht_key { + char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */ + bool is_first; +}; + +/* Set structure holds one action set record. It contains up to three + * actions (depends on size of particular actions). The set is either + * put directly to a rule, or it is stored in KVD linear area. + * To prevent duplicate entries in KVD linear area, a hashtable is + * used to track sets that were previously inserted and may be shared. + */ + +struct mlxsw_afa_set { + struct rhash_head ht_node; + struct mlxsw_afa_set_ht_key ht_key; + u32 kvdl_index; + bool shared; /* Inserted in hashtable (doesn't mean that + * kvdl_index is valid). + */ + unsigned int ref_count; + struct mlxsw_afa_set *next; /* Pointer to the next set. */ + struct mlxsw_afa_set *prev; /* Pointer to the previous set, + * note that set may have multiple + * sets from multiple blocks + * pointing at it. This is only + * usable until commit. + */ +}; + +static const struct rhashtable_params mlxsw_afa_set_ht_params = { + .key_len = sizeof(struct mlxsw_afa_set_ht_key), + .key_offset = offsetof(struct mlxsw_afa_set, ht_key), + .head_offset = offsetof(struct mlxsw_afa_set, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa_fwd_entry_ht_key { + u8 local_port; +}; + +struct mlxsw_afa_fwd_entry { + struct rhash_head ht_node; + struct mlxsw_afa_fwd_entry_ht_key ht_key; + u32 kvdl_index; + unsigned int ref_count; +}; + +static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { + .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key), + .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key), + .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node), + .automatic_shrinking = true, +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv) +{ + struct mlxsw_afa *mlxsw_afa; + int err; + + mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL); + if (!mlxsw_afa) + return ERR_PTR(-ENOMEM); + err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params); + if (err) + goto err_set_rhashtable_init; + err = rhashtable_init(&mlxsw_afa->fwd_entry_ht, + &mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_fwd_entry_rhashtable_init; + mlxsw_afa->max_acts_per_set = max_acts_per_set; + mlxsw_afa->ops = ops; + mlxsw_afa->ops_priv = ops_priv; + return mlxsw_afa; + +err_fwd_entry_rhashtable_init: + rhashtable_destroy(&mlxsw_afa->set_ht); +err_set_rhashtable_init: + kfree(mlxsw_afa); + return ERR_PTR(err); +} +EXPORT_SYMBOL(mlxsw_afa_create); + +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa) +{ + rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); + rhashtable_destroy(&mlxsw_afa->set_ht); + kfree(mlxsw_afa); +} +EXPORT_SYMBOL(mlxsw_afa_destroy); + +static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set, + enum mlxsw_afa_set_goto_binding_cmd cmd, + u16 group_id) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO); + mlxsw_afa_set_goto_g_set(actions, true); + mlxsw_afa_set_goto_binding_cmd_set(actions, cmd); + mlxsw_afa_set_goto_next_binding_set(actions, group_id); +} + +static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set, + u32 next_set_kvdl_index) +{ + char *actions = set->ht_key.enc_actions; + + mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT); + mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first) +{ + struct mlxsw_afa_set *set; + + set = kzalloc(sizeof(*set), GFP_KERNEL); + if (!set) + return NULL; + /* Need to initialize the set to pass by default */ + mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); + set->ht_key.is_first = is_first; + set->ref_count = 1; + return set; +} + +static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set) +{ + kfree(set); +} + +static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + int err; + + err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + if (err) + return err; + err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv, + &set->kvdl_index, + set->ht_key.enc_actions, + set->ht_key.is_first); + if (err) + goto err_kvdl_set_add; + set->shared = true; + set->prev = NULL; + return 0; + +err_kvdl_set_add: + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + return err; +} + +static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv, + set->kvdl_index, + set->ht_key.is_first); + rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node, + mlxsw_afa_set_ht_params); + set->shared = false; +} + +static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *set) +{ + if (--set->ref_count) + return; + if (set->shared) + mlxsw_afa_set_unshare(mlxsw_afa, set); + mlxsw_afa_set_destroy(set); +} + +static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_set *orig_set) +{ + struct mlxsw_afa_set *set; + int err; + + /* There is a hashtable of sets maintained. If a set with the exact + * same encoding exists, we reuse it. Otherwise, the current set + * is shared by making it available to others using the hash table. + */ + set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, + mlxsw_afa_set_ht_params); + if (set) { + set->ref_count++; + mlxsw_afa_set_put(mlxsw_afa, orig_set); + } else { + set = orig_set; + err = mlxsw_afa_set_share(mlxsw_afa, set); + if (err) + return ERR_PTR(err); + } + return set; +} + +/* Block structure holds a list of action sets. One action block + * represents one chain of actions executed upon match of a rule. + */ + +struct mlxsw_afa_block { + struct mlxsw_afa *afa; + bool finished; + struct mlxsw_afa_set *first_set; + struct mlxsw_afa_set *cur_set; + unsigned int cur_act_index; /* In current set. */ + struct list_head fwd_entry_ref_list; +}; + +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) +{ + struct mlxsw_afa_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + INIT_LIST_HEAD(&block->fwd_entry_ref_list); + block->afa = mlxsw_afa; + + /* At least one action set is always present, so just create it here */ + block->first_set = mlxsw_afa_set_create(true); + if (!block->first_set) + goto err_first_set_create; + block->cur_set = block->first_set; + return block; + +err_first_set_create: + kfree(block); + return NULL; +} +EXPORT_SYMBOL(mlxsw_afa_block_create); + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block); + +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->first_set; + struct mlxsw_afa_set *next_set; + + do { + next_set = set->next; + mlxsw_afa_set_put(block->afa, set); + set = next_set; + } while (set); + mlxsw_afa_fwd_entry_refs_destroy(block); + kfree(block); +} +EXPORT_SYMBOL(mlxsw_afa_block_destroy); + +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_set *set = block->cur_set; + struct mlxsw_afa_set *prev_set; + int err; + + block->cur_set = NULL; + + /* Go over all linked sets starting from last + * and try to find existing set in the hash table. + * In case it is not there, assign a KVD linear index + * and insert it. + */ + do { + prev_set = set->prev; + set = mlxsw_afa_set_get(block->afa, set); + if (IS_ERR(set)) { + err = PTR_ERR(set); + goto rollback; + } + if (prev_set) { + prev_set->next = set; + mlxsw_afa_set_next_set(prev_set, set->kvdl_index); + set = prev_set; + } + } while (prev_set); + + block->first_set = set; + block->finished = true; + return 0; + +rollback: + while ((set = set->next)) + mlxsw_afa_set_put(block->afa, set); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_commit); + +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) +{ + return block->first_set->ht_key.enc_actions; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set); + +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +{ + return block->first_set->kvdl_index; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); + +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_continue); + +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) +{ + if (WARN_ON(block->finished)) + return; + mlxsw_afa_set_goto_set(block->cur_set, + MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id); + block->finished = true; +} +EXPORT_SYMBOL(mlxsw_afa_block_jump); + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL); + if (!fwd_entry) + return ERR_PTR(-ENOMEM); + fwd_entry->ht_key.local_port = local_port; + fwd_entry->ref_count = 1; + + err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, + &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv, + &fwd_entry->kvdl_index, + local_port); + if (err) + goto err_kvdl_fwd_entry_add; + return fwd_entry; + +err_kvdl_fwd_entry_add: + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); +err_rhashtable_insert: + kfree(fwd_entry); + return ERR_PTR(err); +} + +static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv, + fwd_entry->kvdl_index); + rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, + mlxsw_afa_fwd_entry_ht_params); + kfree(fwd_entry); +} + +static struct mlxsw_afa_fwd_entry * +mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; + struct mlxsw_afa_fwd_entry *fwd_entry; + + ht_key.local_port = local_port; + fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, + mlxsw_afa_fwd_entry_ht_params); + if (fwd_entry) { + fwd_entry->ref_count++; + return fwd_entry; + } + return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); +} + +static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_fwd_entry *fwd_entry) +{ + if (--fwd_entry->ref_count) + return; + mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); +} + +struct mlxsw_afa_fwd_entry_ref { + struct list_head list; + struct mlxsw_afa_fwd_entry *fwd_entry; +}; + +static struct mlxsw_afa_fwd_entry_ref * +mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry *fwd_entry; + int err; + + fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL); + if (!fwd_entry_ref) + return ERR_PTR(-ENOMEM); + fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port); + if (IS_ERR(fwd_entry)) { + err = PTR_ERR(fwd_entry); + goto err_fwd_entry_get; + } + fwd_entry_ref->fwd_entry = fwd_entry; + list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list); + return fwd_entry_ref; + +err_fwd_entry_get: + kfree(fwd_entry_ref); + return ERR_PTR(err); +} + +static void +mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) +{ + list_del(&fwd_entry_ref->list); + mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); + kfree(fwd_entry_ref); +} + +static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + struct mlxsw_afa_fwd_entry_ref *tmp; + + list_for_each_entry_safe(fwd_entry_ref, tmp, + &block->fwd_entry_ref_list, list) + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); +} + +#define MLXSW_AFA_ONE_ACTION_LEN 32 +#define MLXSW_AFA_PAYLOAD_OFFSET 4 + +static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size) +{ + char *oneact; + char *actions; + + if (WARN_ON(block->finished)) + return NULL; + if (block->cur_act_index + action_size > + block->afa->max_acts_per_set) { + struct mlxsw_afa_set *set; + + /* The appended action won't fit into the current action set, + * so create a new set. + */ + set = mlxsw_afa_set_create(false); + if (!set) + return NULL; + set->prev = block->cur_set; + block->cur_act_index = 0; + block->cur_set->next = set; + block->cur_set = set; + } + + actions = block->cur_set->ht_key.enc_actions; + oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN; + block->cur_act_index += action_size; + mlxsw_afa_all_action_type_set(oneact, action_code); + return oneact + MLXSW_AFA_PAYLOAD_OFFSET; +} + +/* Trap / Discard Action + * --------------------- + * The Trap / Discard action enables trapping / mirroring packets to the CPU + * as well as discarding packets. + * The ACL Trap / Discard separates the forward/discard control from CPU + * trap control. In addition, the Trap / Discard action enables activating + * SPAN (port mirroring). + */ + +#define MLXSW_AFA_TRAPDISC_CODE 0x03 +#define MLXSW_AFA_TRAPDISC_SIZE 1 + +enum mlxsw_afa_trapdisc_forward_action { + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3, +}; + +/* afa_trapdisc_forward_action + * Forward Action. + */ +MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4); + +static inline void +mlxsw_afa_trapdisc_pack(char *payload, + enum mlxsw_afa_trapdisc_forward_action forward_action) +{ + mlxsw_afa_trapdisc_forward_action_set(payload, forward_action); +} + +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_TRAPDISC_CODE, + MLXSW_AFA_TRAPDISC_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_drop); + +/* Forwarding Action + * ----------------- + * Forwarding Action can be used to implement Policy Based Switching (PBS) + * as well as OpenFlow related "Output" action. + */ + +#define MLXSW_AFA_FORWARD_CODE 0x07 +#define MLXSW_AFA_FORWARD_SIZE 1 + +enum mlxsw_afa_forward_type { + /* PBS, Policy Based Switching */ + MLXSW_AFA_FORWARD_TYPE_PBS, + /* Output, OpenFlow output type */ + MLXSW_AFA_FORWARD_TYPE_OUTPUT, +}; + +/* afa_forward_type */ +MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2); + +/* afa_forward_pbs_ptr + * A pointer to the PBS entry configured by PPBS register. + * Reserved when in_port is set. + */ +MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24); + +/* afa_forward_in_port + * Packet is forwarded back to the ingress port. + */ +MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1); + +static inline void +mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, + u32 pbs_ptr, bool in_port) +{ + mlxsw_afa_forward_type_set(payload, type); + mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr); + mlxsw_afa_forward_in_port_set(payload, in_port); +} + +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + u32 kvdl_index = 0; + char *act; + int err; + + if (!in_port) { + fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, + local_port); + if (IS_ERR(fwd_entry_ref)) + return PTR_ERR(fwd_entry_ref); + kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; + } + + act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, + MLXSW_AFA_FORWARD_SIZE); + if (!act) { + err = -ENOBUFS; + goto err_append_action; + } + mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_OUTPUT, + kvdl_index, in_port); + return 0; + +err_append_action: + if (!in_port) + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h new file mode 100644 index 000000000000..43f78dcfe394 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -0,0 +1,66 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H +#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H + +#include <linux/types.h> + +struct mlxsw_afa; +struct mlxsw_afa_block; + +struct mlxsw_afa_ops { + int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first); + void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); + void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); +}; + +struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, + const struct mlxsw_afa_ops *ops, + void *ops_priv); +void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa); +struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); +void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); +int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); +void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); +int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, + u8 local_port, bool in_port); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c new file mode 100644 index 000000000000..b32a00972e83 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -0,0 +1,475 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/errno.h> + +#include "item.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_afk { + struct list_head key_info_list; + unsigned int max_blocks; + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; +}; + +static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->type != elinst->info->type || + elinst->item.size.bits != + elinst->info->item.size.bits) + return false; + } + } + return true; +} + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count) +{ + struct mlxsw_afk *mlxsw_afk; + + mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL); + if (!mlxsw_afk) + return NULL; + INIT_LIST_HEAD(&mlxsw_afk->key_info_list); + mlxsw_afk->max_blocks = max_blocks; + mlxsw_afk->blocks = blocks; + mlxsw_afk->blocks_count = blocks_count; + WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); + return mlxsw_afk; +} +EXPORT_SYMBOL(mlxsw_afk_create); + +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk) +{ + WARN_ON(!list_empty(&mlxsw_afk->key_info_list)); + kfree(mlxsw_afk); +} +EXPORT_SYMBOL(mlxsw_afk_destroy); + +struct mlxsw_afk_key_info { + struct list_head list; + unsigned int ref_count; + unsigned int blocks_count; + int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value + * is index inside "blocks" + */ + struct mlxsw_afk_element_usage elusage; + const struct mlxsw_afk_block *blocks[0]; +}; + +static bool +mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) { + if (mlxsw_afk_key_info_elements_eq(key_info, elusage)) + return key_info; + } + return NULL; +} + +struct mlxsw_afk_picker { + struct { + DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + unsigned int total; + } hits[0]; +}; + +static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + enum mlxsw_afk_element element) +{ + int i; + int j; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i]; + + for (j = 0; j < block->instances_count; j++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[j]; + if (elinst->info->element == element) { + __set_bit(element, picker->hits[i].element); + picker->hits[i].total++; + } + } + } +} + +static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index) +{ + DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX); + int i; + int j; + + memcpy(&hits_element, &picker->hits[block_index].element, + sizeof(hits_element)); + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) { + if (__test_and_clear_bit(j, picker->hits[i].element)) + picker->hits[i].total--; + } + } +} + +static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker) +{ + int most_index = -EINVAL; /* Should never happen to return this */ + int most_hits = 0; + int i; + + for (i = 0; i < mlxsw_afk->blocks_count; i++) { + if (picker->hits[i].total > most_hits) { + most_hits = picker->hits[i].total; + most_index = i; + } + } + return most_index; +} + +static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_picker *picker, + int block_index, + struct mlxsw_afk_key_info *key_info) +{ + enum mlxsw_afk_element element; + + if (key_info->blocks_count == mlxsw_afk->max_blocks) + return -EINVAL; + + for_each_set_bit(element, picker->hits[block_index].element, + MLXSW_AFK_ELEMENT_MAX) { + key_info->element_to_block[element] = key_info->blocks_count; + mlxsw_afk_element_usage_add(&key_info->elusage, element); + } + + key_info->blocks[key_info->blocks_count] = + &mlxsw_afk->blocks[block_index]; + key_info->blocks_count++; + return 0; +} + +static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_picker *picker; + enum mlxsw_afk_element element; + size_t alloc_size; + int err; + + alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count; + picker = kzalloc(alloc_size, GFP_KERNEL); + if (!picker) + return -ENOMEM; + + /* Since the same elements could be present in multiple blocks, + * we must find out optimal block list in order to make the + * block count as low as possible. + * + * First, we count hits. We go over all available blocks and count + * how many of requested elements are covered by each. + * + * Then in loop, we find block with most hits and add it to + * output key_info. Then we have to subtract this block hits so + * the next iteration will find most suitable block for + * the rest of requested elements. + */ + + mlxsw_afk_element_usage_for_each(element, elusage) + mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element); + + do { + int block_index; + + block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker); + if (block_index < 0) { + err = block_index; + goto out; + } + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, + block_index, key_info); + if (err) + goto out; + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); + } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); + + err = 0; +out: + kfree(picker); + return err; +} + +static struct mlxsw_afk_key_info * +mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + size_t alloc_size; + int err; + + alloc_size = sizeof(*key_info) + + sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks; + key_info = kzalloc(alloc_size, GFP_KERNEL); + if (!key_info) + return ERR_PTR(-ENOMEM); + err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage); + if (err) + goto err_picker; + list_add(&key_info->list, &mlxsw_afk->key_info_list); + key_info->ref_count = 1; + return key_info; + +err_picker: + kfree(key_info); + return ERR_PTR(err); +} + +static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info) +{ + list_del(&key_info->list); + kfree(key_info); +} + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk_key_info *key_info; + + key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); + if (key_info) { + key_info->ref_count++; + return key_info; + } + return mlxsw_afk_key_info_create(mlxsw_afk, elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_get); + +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) +{ + if (--key_info->ref_count) + return; + mlxsw_afk_key_info_destroy(key_info); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_put); + +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage) +{ + return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage); +} +EXPORT_SYMBOL(mlxsw_afk_key_info_subset); + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block, + enum mlxsw_afk_element element) +{ + int i; + + for (i = 0; i < block->instances_count; i++) { + struct mlxsw_afk_element_inst *elinst; + + elinst = &block->instances[i]; + if (elinst->info->element == element) + return elinst; + } + return NULL; +} + +static const struct mlxsw_afk_element_inst * +mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info, + enum mlxsw_afk_element element, + int *p_block_index) +{ + const struct mlxsw_afk_element_inst *elinst; + const struct mlxsw_afk_block *block; + int block_index; + + if (WARN_ON(!test_bit(element, key_info->elusage.usage))) + return NULL; + block_index = key_info->element_to_block[element]; + block = key_info->blocks[block_index]; + + elinst = mlxsw_afk_block_elinst_get(block, element); + if (WARN_ON(!elinst)) + return NULL; + + *p_block_index = block_index; + return elinst; +} + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index) +{ + return key_info->blocks[block_index]->encoding; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get); + +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info) +{ + return key_info->blocks_count; +} +EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get); + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!mask_value) + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32)) + return; + __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value); + __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_u32); + +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len) +{ + const struct mlxsw_afk_element_info *elinfo = + &mlxsw_afk_element_infos[element]; + const struct mlxsw_item *storage_item = &elinfo->item; + + if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */ + return; + if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) || + WARN_ON(elinfo->item.size.bytes != len)) + return; + __mlxsw_item_memcpy_to(values->storage.key, key_value, + storage_item, 0); + __mlxsw_item_memcpy_to(values->storage.mask, mask_value, + storage_item, 0); + mlxsw_afk_element_usage_add(&values->elusage, element); +} +EXPORT_SYMBOL(mlxsw_afk_values_add_buf); + +static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + u32 value; + + value = __mlxsw_item_get32(storage, storage_item, 0); + __mlxsw_item_set32(output_indexed, output_item, 0, value); +} + +static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output_indexed) +{ + char *storage_data = __mlxsw_item_data(storage, storage_item, 0); + char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + size_t len = output_item->size.bytes; + + memcpy(output_data, storage_data, len); +} + +#define MLXSW_AFK_KEY_BLOCK_SIZE 16 + +static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + int block_index, char *storage, char *output) +{ + char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; + const struct mlxsw_item *storage_item = &elinst->info->item; + const struct mlxsw_item *output_item = &elinst->item; + + if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) + mlxsw_afk_encode_u32(storage_item, output_item, + storage, output_indexed); + else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) + mlxsw_afk_encode_buf(storage_item, output_item, + storage, output_indexed); +} + +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask) +{ + const struct mlxsw_afk_element_inst *elinst; + enum mlxsw_afk_element element; + int block_index; + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, element, + &block_index); + if (!elinst) + continue; + mlxsw_afk_encode_one(elinst, block_index, + values->storage.key, key); + mlxsw_afk_encode_one(elinst, block_index, + values->storage.mask, mask); + } +} +EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h new file mode 100644 index 000000000000..e4fcba7c2af2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -0,0 +1,238 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H +#define _MLXSW_CORE_ACL_FLEX_KEYS_H + +#include <linux/types.h> +#include <linux/bitmap.h> + +#include "item.h" + +enum mlxsw_afk_element { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_MAX, +}; + +enum mlxsw_afk_element_type { + MLXSW_AFK_ELEMENT_TYPE_U32, + MLXSW_AFK_ELEMENT_TYPE_BUF, +}; + +struct mlxsw_afk_element_info { + enum mlxsw_afk_element element; /* element ID */ + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in internal storage */ +}; + +#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size) \ + [MLXSW_AFK_ELEMENT_##_element] = { \ + .element = MLXSW_AFK_ELEMENT_##_element, \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +/* For the purpose of the driver, define a internal storage scratchpad + * that will be used to store key/mask values. For each defined element type + * define an internal storage geometry. + */ +static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), +}; + +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 + +struct mlxsw_afk_element_inst { /* element instance in actual block */ + const struct mlxsw_afk_element_info *info; + enum mlxsw_afk_element_type type; + struct mlxsw_item item; /* element geometry in block */ +}; + +#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \ + { \ + .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \ + .type = _type, \ + .item = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _size}, \ + .name = #_element, \ + }, \ + } + +#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \ + _element, _offset, _shift, _size) + +#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \ + MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \ + _element, _offset, 0, _size) + +struct mlxsw_afk_block { + u16 encoding; /* block ID */ + struct mlxsw_afk_element_inst *instances; + unsigned int instances_count; +}; + +#define MLXSW_AFK_BLOCK(_encoding, _instances) \ + { \ + .encoding = _encoding, \ + .instances = _instances, \ + .instances_count = ARRAY_SIZE(_instances), \ + } + +struct mlxsw_afk_element_usage { + DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); +}; + +#define mlxsw_afk_element_usage_for_each(element, elusage) \ + for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX) + +static inline void +mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage, + enum mlxsw_afk_element element) +{ + __set_bit(element, elusage->usage); +} + +static inline void +mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage) +{ + bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX); +} + +static inline void +mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage, + const enum mlxsw_afk_element *elements, + unsigned int elements_count) +{ + int i; + + mlxsw_afk_element_usage_zero(elusage); + for (i = 0; i < elements_count; i++) + mlxsw_afk_element_usage_add(elusage, elements[i]); +} + +static inline bool +mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, + struct mlxsw_afk_element_usage *elusage_big) +{ + int i; + + for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++) + if (test_bit(i, elusage_small->usage) && + !test_bit(i, elusage_big->usage)) + return false; + return true; +} + +struct mlxsw_afk; + +struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, + const struct mlxsw_afk_block *blocks, + unsigned int blocks_count); +void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); + +struct mlxsw_afk_key_info; + +struct mlxsw_afk_key_info * +mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_element_usage *elusage); +void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info); +bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_usage *elusage); + +u16 +mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info, + int block_index); +unsigned int +mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info); + +struct mlxsw_afk_element_values { + struct mlxsw_afk_element_usage elusage; + struct { + char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE]; + } storage; +}; + +void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, + enum mlxsw_afk_element element, + const char *key_value, const char *mask_value, + unsigned int len); +void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, + struct mlxsw_afk_element_values *values, + char *key, char *mask); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index e50c8db2602a..12c3a4449120 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -338,7 +338,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, return -EIO; } - return err > 0 ? 0 : err; + return 0; } /* Routine executes I2C command. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 3c95e3ddd9c2..28427f0758c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/item.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -72,6 +72,40 @@ __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index, typesize); } +static inline u8 __mlxsw_item_get8(const char *buf, + const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8)); + u8 *b = (u8 *) buf; + u8 tmp; + + tmp = b[offset]; + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item, + unsigned short index, u8 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u8)); + u8 *b = (u8 *) buf; + u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u8 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = b[offset]; + tmp &= ~mask; + tmp |= val; + b[offset] = tmp; +} + static inline u16 __mlxsw_item_get16(const char *buf, const struct mlxsw_item *item, unsigned short index) @@ -191,6 +225,14 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, memcpy(&buf[offset], src, item->size.bytes); } +static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + return &buf[offset]; +} + static inline u16 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, u16 index, u8 *shift) @@ -253,6 +295,47 @@ static inline void __mlxsw_item_bit_array_set(char *buf, * _iname: item name within the container */ +#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ +{ \ + return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u8 val) \ +{ \ + __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .offset = _offset, \ @@ -393,6 +476,11 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ +{ \ + return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ @@ -419,6 +507,12 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ { \ __mlxsw_item_memcpy_to(buf, src, \ &__ITEM_NAME(_type, _cname, _iname), index); \ +} \ +static inline char * \ +mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_data(buf, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1357fe04391b..b50a312d89c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,9 +1,9 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> - * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -1757,6 +1757,505 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* PPBT - Policy-Engine Port Binding Table + * --------------------------------------- + * This register is used for configuration of the Port Binding Table. + */ +#define MLXSW_REG_PPBT_ID 0x3002 +#define MLXSW_REG_PPBT_LEN 0x14 + +MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN); + +enum mlxsw_reg_pxbt_e { + MLXSW_REG_PXBT_E_IACL, + MLXSW_REG_PXBT_E_EACL, +}; + +/* reg_ppbt_e + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1); + +enum mlxsw_reg_pxbt_op { + MLXSW_REG_PXBT_OP_BIND, + MLXSW_REG_PXBT_OP_UNBIND, +}; + +/* reg_ppbt_op + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); + +/* reg_ppbt_local_port + * Local port. Not including CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); + +/* reg_ppbt_g + * group - When set, the binding is of an ACL group. When cleared, + * the binding is of an ACL. + * Must be set to 1 for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1); + +/* reg_ppbt_acl_info + * ACL/ACL group identifier. If the g bit is set, this field should hold + * the acl_group_id, else it should hold the acl_id. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, + enum mlxsw_reg_pxbt_op op, + u8 local_port, u16 acl_info) +{ + MLXSW_REG_ZERO(ppbt, payload); + mlxsw_reg_ppbt_e_set(payload, e); + mlxsw_reg_ppbt_op_set(payload, op); + mlxsw_reg_ppbt_local_port_set(payload, local_port); + mlxsw_reg_ppbt_g_set(payload, true); + mlxsw_reg_ppbt_acl_info_set(payload, acl_info); +} + +/* PACL - Policy-Engine ACL Register + * --------------------------------- + * This register is used for configuration of the ACL. + */ +#define MLXSW_REG_PACL_ID 0x3004 +#define MLXSW_REG_PACL_LEN 0x70 + +MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN); + +/* reg_pacl_v + * Valid. Setting the v bit makes the ACL valid. It should not be cleared + * while the ACL is bounded to either a port, VLAN or ACL rule. + * Access: RW + */ +MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1); + +/* reg_pacl_acl_id + * An identifier representing the ACL (managed by software) + * Range 0 .. cap_max_acl_regions - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16); + +#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16 + +/* reg_pacl_tcam_region_info + * Opaque object that represents a TCAM region. + * Obtained through PTAR register. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id, + bool valid, const char *tcam_region_info) +{ + MLXSW_REG_ZERO(pacl, payload); + mlxsw_reg_pacl_acl_id_set(payload, acl_id); + mlxsw_reg_pacl_v_set(payload, valid); + mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + +/* PAGT - Policy-Engine ACL Group Table + * ------------------------------------ + * This register is used for configuration of the ACL Group Table. + */ +#define MLXSW_REG_PAGT_ID 0x3005 +#define MLXSW_REG_PAGT_BASE_LEN 0x30 +#define MLXSW_REG_PAGT_ACL_LEN 4 +#define MLXSW_REG_PAGT_ACL_MAX_NUM 16 +#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \ + MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN) + +MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN); + +/* reg_pagt_size + * Number of ACLs in the group. + * Size 0 invalidates a group. + * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now) + * Total number of ACLs in all groups must be lower or equal + * to cap_max_acl_tot_groups + * Note: a group which is binded must not be invalidated + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); + +/* reg_pagt_acl_group_id + * An identifier (numbered from 0..cap_max_acl_groups-1) representing + * the ACL Group identifier (managed by software). + * Access: Index + */ +MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); + +/* reg_pagt_acl_id + * ACL identifier + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false); + +static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) +{ + MLXSW_REG_ZERO(pagt, payload); + mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id); +} + +static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, + u16 acl_id) +{ + u8 size = mlxsw_reg_pagt_size_get(payload); + + if (index >= size) + mlxsw_reg_pagt_size_set(payload, index + 1); + mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); +} + +/* PTAR - Policy-Engine TCAM Allocation Register + * --------------------------------------------- + * This register is used for allocation of regions in the TCAM. + * Note: Query method is not supported on this register. + */ +#define MLXSW_REG_PTAR_ID 0x3006 +#define MLXSW_REG_PTAR_BASE_LEN 0x20 +#define MLXSW_REG_PTAR_KEY_ID_LEN 1 +#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16 +#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \ + MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN) + +MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN); + +enum mlxsw_reg_ptar_op { + /* allocate a TCAM region */ + MLXSW_REG_PTAR_OP_ALLOC, + /* resize a TCAM region */ + MLXSW_REG_PTAR_OP_RESIZE, + /* deallocate TCAM region */ + MLXSW_REG_PTAR_OP_FREE, + /* test allocation */ + MLXSW_REG_PTAR_OP_TEST, +}; + +/* reg_ptar_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); + +/* reg_ptar_action_set_type + * Type of action set to be used on this region. + * For Spectrum, this is always type 2 - "flexible" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); + +/* reg_ptar_key_type + * TCAM key type for the region. + * For Spectrum, this is always type 0x50 - "FLEX_KEY" + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); + +/* reg_ptar_region_size + * TCAM region size. When allocating/resizing this is the requested size, + * the response is the actual size. Note that actual size may be + * larger than requested. + * Allowed range 1 .. cap_max_rules-1 + * Reserved during op deallocate. + * Access: WO + */ +MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16); + +/* reg_ptar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16); + +/* reg_ptar_tcam_region_info + * Opaque object that represents the TCAM region. + * Returned when allocating a region. + * Provided by software for ACL generation and region deallocation and resize. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_ptar_flexible_key_id + * Identifier of the Flexible Key. + * Only valid if key_type == "FLEX_KEY" + * The key size will be rounded up to one of the following values: + * 9B, 18B, 36B, 54B. + * This field is reserved for in resize operation. + * Access: WO + */ +MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, + MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); + +static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + u16 region_size, u16 region_id, + const char *tcam_region_info) +{ + MLXSW_REG_ZERO(ptar, payload); + mlxsw_reg_ptar_op_set(payload, op); + mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ + mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_region_size_set(payload, region_size); + mlxsw_reg_ptar_region_id_set(payload, region_id); + mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + +static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index, + u16 key_id) +{ + mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id); +} + +static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) +{ + mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); +} + +/* PPBS - Policy-Engine Policy Based Switching Register + * ---------------------------------------------------- + * This register retrieves and sets Policy Based Switching Table entries. + */ +#define MLXSW_REG_PPBS_ID 0x300C +#define MLXSW_REG_PPBS_LEN 0x14 + +MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN); + +/* reg_ppbs_pbs_ptr + * Index into the PBS table. + * For Spectrum, the index points to the KVD Linear. + * Access: Index + */ +MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24); + +/* reg_ppbs_system_port + * Unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16); + +static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr, + u16 system_port) +{ + MLXSW_REG_ZERO(ppbs, payload); + mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr); + mlxsw_reg_ppbs_system_port_set(payload, system_port); +} + +/* PRCR - Policy-Engine Rules Copy Register + * ---------------------------------------- + * This register is used for accessing rules within a TCAM region. + */ +#define MLXSW_REG_PRCR_ID 0x300D +#define MLXSW_REG_PRCR_LEN 0x40 + +MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN); + +enum mlxsw_reg_prcr_op { + /* Move rules. Moves the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_MOVE, + /* Copy rules. Copies the rules from "tcam_region_info" starting + * at offset "offset" to "dest_tcam_region_info" + * at offset "dest_offset." + */ + MLXSW_REG_PRCR_OP_COPY, +}; + +/* reg_prcr_op + * Access: OP + */ +MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4); + +/* reg_prcr_offset + * Offset within the source region to copy/move from. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16); + +/* reg_prcr_size + * The number of rules to copy/move. + * Access: WO + */ +MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16); + +/* reg_prcr_tcam_region_info + * Opaque object that represents the source TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_prcr_dest_offset + * Offset within the source region to copy/move to. + * Access: Index + */ +MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16); + +/* reg_prcr_dest_tcam_region_info + * Opaque object that represents the destination TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op, + const char *src_tcam_region_info, + u16 src_offset, + const char *dest_tcam_region_info, + u16 dest_offset, u16 size) +{ + MLXSW_REG_ZERO(prcr, payload); + mlxsw_reg_prcr_op_set(payload, op); + mlxsw_reg_prcr_offset_set(payload, src_offset); + mlxsw_reg_prcr_size_set(payload, size); + mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload, + src_tcam_region_info); + mlxsw_reg_prcr_dest_offset_set(payload, dest_offset); + mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload, + dest_tcam_region_info); +} + +/* PEFA - Policy-Engine Extended Flexible Action Register + * ------------------------------------------------------ + * This register is used for accessing an extended flexible action entry + * in the central KVD Linear Database. + */ +#define MLXSW_REG_PEFA_ID 0x300F +#define MLXSW_REG_PEFA_LEN 0xB0 + +MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); + +/* reg_pefa_index + * Index in the KVD Linear Centralized Database. + * Access: Index + */ +MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); + +#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 + +/* reg_pefa_flex_action_set + * Action-set to perform when rule is matched. + * Must be zero padded if action set is shorter. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, + const char *flex_action_set) +{ + MLXSW_REG_ZERO(pefa, payload); + mlxsw_reg_pefa_index_set(payload, index); + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); +} + +/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 + * ----------------------------------------------------- + * This register is used for accessing rules within a TCAM region. + * It is a new version of PTCE in order to support wider key, + * mask and action within a TCAM region. This register is not supported + * by SwitchX and SwitchX-2. + */ +#define MLXSW_REG_PTCE2_ID 0x3017 +#define MLXSW_REG_PTCE2_LEN 0x1D8 + +MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN); + +/* reg_ptce2_v + * Valid. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1); + +/* reg_ptce2_a + * Activity. Set if a packet lookup has hit on the specific entry. + * To clear the "a" bit, use "clear activity" op or "clear on read" op. + * Access: RO + */ +MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1); + +enum mlxsw_reg_ptce2_op { + /* Read operation. */ + MLXSW_REG_PTCE2_OP_QUERY_READ = 0, + /* clear on read operation. Used to read entry + * and clear Activity bit. + */ + MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1, + /* Write operation. Used to write a new entry to the table. + * All R/W fields are relevant for new entry. Activity bit is set + * for new entries - Note write with v = 0 will delete the entry. + */ + MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0, + /* Update action. Only action set will be updated. */ + MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2, +}; + +/* reg_ptce2_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); + +/* reg_ptce2_offset + * Access: Index + */ +MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); + +/* reg_ptce2_tcam_region_info + * Opaque object that represents the TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 + +/* reg_ptce2_flex_key_blocks + * ACL Key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce2_mask + * mask- in the same size as key. A bit that is set directs the TCAM + * to compare the corresponding bit in key. A bit that is clear directs + * the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, + MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce2_flex_action_set + * ACL action set. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, + MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + +static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, + enum mlxsw_reg_ptce2_op op, + const char *tcam_region_info, + u16 offset) +{ + MLXSW_REG_ZERO(ptce2, payload); + mlxsw_reg_ptce2_v_set(payload, valid); + mlxsw_reg_ptce2_op_set(payload, op); + mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -4965,6 +5464,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, MLXSW_REG_MLCR_DURATION_MAX : 0); } +/* MPSC - Monitoring Packet Sampling Configuration Register + * -------------------------------------------------------- + * MPSC Register is used to configure the Packet Sampling mechanism. + */ +#define MLXSW_REG_MPSC_ID 0x9080 +#define MLXSW_REG_MPSC_LEN 0x1C + +MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); + +/* reg_mpsc_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); + +/* reg_mpsc_e + * Enable sampling on port local_port + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); + +#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL + +/* reg_mpsc_rate + * Sampling rate = 1 out of rate packets (with randomization around + * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX + * Access: RW + */ +MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); + +static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, + u32 rate) +{ + MLXSW_REG_ZERO(mpsc, payload); + mlxsw_reg_mpsc_local_port_set(payload, local_port); + mlxsw_reg_mpsc_e_set(payload, e); + mlxsw_reg_mpsc_rate_set(payload, rate); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5394,6 +5933,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(ppbt), + MLXSW_REG(pacl), + MLXSW_REG(pagt), + MLXSW_REG(ptar), + MLXSW_REG(ppbs), + MLXSW_REG(prcr), + MLXSW_REG(pefa), + MLXSW_REG(ptce2), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), @@ -5429,6 +5976,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpat), MLXSW_REG(mpar), MLXSW_REG(mlcr), + MLXSW_REG(mpsc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 3c2171dbdba4..bce8c2e00630 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/resources.h - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -48,6 +48,14 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, + MLXSW_RES_ID_ACL_MAX_TCAM_RULES, + MLXSW_RES_ID_ACL_MAX_REGIONS, + MLXSW_RES_ID_ACL_MAX_GROUPS, + MLXSW_RES_ID_ACL_MAX_GROUP_SIZE, + MLXSW_RES_ID_ACL_FLEX_KEYS, + MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, + MLXSW_RES_ID_ACL_ACTIONS_PER_SET, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -72,6 +80,14 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, + [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, + [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, + [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904, + [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905, + [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, + [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, + [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 003093abb170..8a52c860794b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * @@ -57,6 +57,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <net/netevent.h> +#include <net/tc_act/tc_sample.h> #include "spectrum.h" #include "pci.h" @@ -137,8 +138,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); -static bool mlxsw_sp_port_dev_check(const struct net_device *dev); - static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -469,6 +468,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); } +static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable, u32 rate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mpsc_pl[MLXSW_REG_MPSC_LEN]; + + mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -948,15 +957,13 @@ out: /* Return the stats from a cache that is updated periodically, * as this function might get called in an atomic context. */ -static struct rtnl_link_stats64 * +static void mlxsw_sp_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); - - return stats; } int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, @@ -1164,8 +1171,8 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, } static struct mlxsw_sp_port_mall_tc_entry * -mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, - unsigned long cookie) { +mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) @@ -1177,17 +1184,15 @@ mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, static int mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, const struct tc_action *a, bool ingress) { - struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct net *net = dev_net(mlxsw_sp_port->dev); enum mlxsw_sp_span_type span_type; struct mlxsw_sp_port *to_port; struct net_device *to_dev; int ifindex; - int err; ifindex = tcf_mirred_ifindex(a); to_dev = __dev_get_by_index(net, ifindex); @@ -1198,90 +1203,149 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, if (!mlxsw_sp_port_dev_check(to_dev)) { netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -ENOTSUPP; + return -EOPNOTSUPP; } to_port = netdev_priv(to_dev); - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; + mirror->to_local_port = to_port->local_port; + mirror->ingress = ingress; + span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); +} - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; - mall_tc_entry->mirror.to_local_port = to_port->local_port; - mall_tc_entry->mirror.ingress = ingress; - list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); +static void +mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; - span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + to_port = mlxsw_sp->ports[mirror->to_local_port]; + span_type = mirror->ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); +} + +static int +mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + int err; + + if (!mlxsw_sp_port->sample) + return -EOPNOTSUPP; + if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) { + netdev_err(mlxsw_sp_port->dev, "sample already active\n"); + return -EEXIST; + } + if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { + netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); + return -EOPNOTSUPP; + } + + rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, + tcf_sample_psample_group(a)); + mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); + mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); + mlxsw_sp_port->sample->rate = tcf_sample_rate(a); + + err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); if (err) - goto err_mirror_add; + goto err_port_sample_set; return 0; -err_mirror_add: - list_del(&mall_tc_entry->list); - kfree(mall_tc_entry); +err_port_sample_set: + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); return err; } +static void +mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!mlxsw_sp_port->sample) + return; + + mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1); + RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL); +} + static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, __be16 protocol, struct tc_cls_matchall_offload *cls, bool ingress) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; const struct tc_action *a; LIST_HEAD(actions); int err; if (!tc_single_action(cls->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } - tcf_exts_to_list(cls->exts, &actions); - list_for_each_entry(a, &actions, list) { - if (!is_tcf_mirred_egress_mirror(a) || - protocol != htons(ETH_P_ALL)) { - return -ENOTSUPP; - } + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + mall_tc_entry->cookie = cls->cookie; - err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, + tcf_exts_to_list(cls->exts, &actions); + a = list_first_entry(&actions, struct tc_action, list); + + if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { + struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; + + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, + mirror, a, ingress); + } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { + mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, a, ingress); - if (err) - return err; + } else { + err = -EOPNOTSUPP; } + if (err) + goto err_add_action; + + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); return 0; + +err_add_action: + kfree(mall_tc_entry); + return err; } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *cls) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; - enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, - cls->cookie); + mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, + cls->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; } + list_del(&mall_tc_entry->list); switch (mall_tc_entry->type) { case MLXSW_SP_PORT_MALL_MIRROR: - to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; - span_type = mall_tc_entry->mirror.ingress ? - MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port, + &mall_tc_entry->mirror); + break; + case MLXSW_SP_PORT_MALL_SAMPLE: + mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port); break; default: WARN_ON(1); } - list_del(&mall_tc_entry->list); kfree(mall_tc_entry); } @@ -1291,7 +1355,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); - if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->type) { + case TC_SETUP_MATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, @@ -1305,9 +1370,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, default: return -EINVAL; } + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, + proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, + tc->cls_flower); + return 0; + default: + return -EOPNOTSUPP; + } } - return -ENOTSUPP; + return -EOPNOTSUPP; } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -1650,7 +1727,7 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, break; default: WARN_ON(1); - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; } @@ -2256,6 +2333,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_alloc_stats; } + mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample), + GFP_KERNEL); + if (!mlxsw_sp_port->sample) { + err = -ENOMEM; + goto err_alloc_sample; + } + mlxsw_sp_port->hw_stats.cache = kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); @@ -2384,6 +2468,8 @@ err_dev_addr_init: err_port_swid_set: kfree(mlxsw_sp_port->hw_stats.cache); err_alloc_hw_stats: + kfree(mlxsw_sp_port->sample); +err_alloc_sample: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -2429,8 +2515,9 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); - free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->hw_stats.cache); + kfree(mlxsw_sp_port->sample); + free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); @@ -2731,6 +2818,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } +static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct psample_group *psample_group; + u32 size; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n", + local_port); + goto out; + } + if (unlikely(!mlxsw_sp_port->sample)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n", + local_port); + goto out; + } + + size = mlxsw_sp_port->sample->truncate ? + mlxsw_sp_port->sample->trunc_size : skb->len; + + rcu_read_lock(); + psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group); + if (!psample_group) + goto out_unlock; + psample_sample_packet(psample_group, skb, size, + mlxsw_sp_port->dev->ifindex, 0, + mlxsw_sp_port->sample->rate); +out_unlock: + rcu_read_unlock(); +out: + consume_skb(skb); +} + #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) @@ -2766,6 +2888,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + /* PKT Sample trap */ + MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, + false, SP_IP2ME, DISCARD) }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3089,6 +3214,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_span_init; } + err = mlxsw_sp_acl_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); + goto err_acl_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -3098,6 +3229,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_acl_fini(mlxsw_sp); +err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: mlxsw_sp_router_fini(mlxsw_sp); @@ -3118,6 +3251,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3183,7 +3317,7 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; -static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index cc1af19d699a..4d251e06bf31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * @@ -46,9 +46,13 @@ #include <linux/dcbnl.h> #include <linux/in6.h> #include <linux/notifier.h> +#include <net/psample.h> +#include <net/pkt_cls.h> #include "port.h" #include "core.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ @@ -229,6 +233,7 @@ struct mlxsw_sp_span_entry { enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, + MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { @@ -260,6 +265,8 @@ struct mlxsw_sp_router { bool aborted; }; +struct mlxsw_sp_acl; + struct mlxsw_sp { struct { struct list_head list; @@ -289,6 +296,7 @@ struct mlxsw_sp { u8 port_to_module[MLXSW_PORT_MAX_PORTS]; struct mlxsw_sp_sb sb; struct mlxsw_sp_router router; + struct mlxsw_sp_acl *acl; struct { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; @@ -315,6 +323,13 @@ struct mlxsw_sp_port_pcpu_stats { u32 tx_dropped; }; +struct mlxsw_sp_port_sample { + struct psample_group __rcu *psample_group; + u32 trunc_size; + u32 rate; + bool truncate; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -361,8 +376,10 @@ struct mlxsw_sp_port { struct rtnl_link_stats64 *cache; struct delayed_work update_dw; } hw_stats; + struct mlxsw_sp_port_sample *sample; }; +bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); @@ -592,4 +609,99 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); + +struct mlxsw_sp_acl_rule_info { + unsigned int priority; + struct mlxsw_afk_element_values values; + struct mlxsw_afa_block *act_block; +}; + +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct net_device *dev, bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + size_t rule_priv_size; + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); +}; + +struct mlxsw_sp_acl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + const struct mlxsw_sp_acl_profile_ops * + (*profile_ops)(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); +}; + +struct mlxsw_sp_acl_ruleset; + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile); +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset); + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority); +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value); +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len); +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id); +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev); + +struct mlxsw_sp_acl_rule; + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule); +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie); +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); + +extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; + +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f); +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c new file mode 100644 index 000000000000..8a18b3aa70dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -0,0 +1,572 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" +#include "core_acl_flex_actions.h" +#include "spectrum_acl_flex_keys.h" + +struct mlxsw_sp_acl { + struct mlxsw_afk *afk; + struct mlxsw_afa *afa; + const struct mlxsw_sp_acl_ops *ops; + struct rhashtable ruleset_ht; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) +{ + return acl->afk; +} + +struct mlxsw_sp_acl_ruleset_ht_key { + struct net_device *dev; /* dev this ruleset is bound to */ + bool ingress; + const struct mlxsw_sp_acl_profile_ops *ops; +}; + +struct mlxsw_sp_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct rhashtable rule_ht; + unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_rule { + struct rhash_head ht_node; /* Member of rule HT */ + unsigned long cookie; /* HT key */ + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule_info *rulei; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), + .automatic_shrinking = true, +}; + +static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), + .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), + .automatic_shrinking = true, +}; + +static struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_acl_profile_ops *ops) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset *ruleset; + size_t alloc_size; + int err; + + alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; + ruleset = kzalloc(alloc_size, GFP_KERNEL); + if (!ruleset) + return ERR_PTR(-ENOMEM); + ruleset->ref_count = 1; + ruleset->ht_key.ops = ops; + + err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_init; + + err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + if (err) + goto err_ops_ruleset_add; + + return ruleset; + +err_ops_ruleset_add: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); +} + +static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + ops->ruleset_del(mlxsw_sp, ruleset->priv); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset); +} + +static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + struct net_device *dev, bool ingress) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + int err; + + ruleset->ht_key.dev = dev; + ruleset->ht_key.ingress = ingress; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + if (err) + return err; + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + if (err) + goto err_ops_ruleset_bind; + return 0; + +err_ops_ruleset_bind: + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); + return err; +} + +static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + mlxsw_sp_acl_ruleset_ht_params); +} + +static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) +{ + ruleset->ref_count++; +} + +static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + if (--ruleset->ref_count) + return; + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, bool ingress, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + struct mlxsw_sp_acl_ruleset *ruleset; + int err; + + ops = acl->ops->profile_ops(mlxsw_sp, profile); + if (!ops) + return ERR_PTR(-EINVAL); + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.dev = dev; + ht_key.ingress = ingress; + ht_key.ops = ops; + ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + mlxsw_sp_acl_ruleset_ht_params); + if (ruleset) { + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + return ruleset; + } + ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); + if (IS_ERR(ruleset)) + return ruleset; + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); + if (err) + goto err_ruleset_bind; + return ruleset; + +err_ruleset_bind: + mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); + if (!rulei) + return NULL; + rulei->act_block = mlxsw_afa_block_create(acl->afa); + if (IS_ERR(rulei->act_block)) { + err = PTR_ERR(rulei->act_block); + goto err_afa_block_create; + } + return rulei; + +err_afa_block_create: + kfree(rulei); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_destroy(rulei->act_block); + kfree(rulei); +} + +int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_commit(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, + unsigned int priority) +{ + rulei->priority = priority; +} + +void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + u32 key_value, u32 mask_value) +{ + mlxsw_afk_values_add_u32(&rulei->values, element, + key_value, mask_value); +} + +void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, + enum mlxsw_afk_element element, + const char *key_value, + const char *mask_value, unsigned int len) +{ + mlxsw_afk_values_add_buf(&rulei->values, element, + key_value, mask_value, len); +} + +void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) +{ + mlxsw_afa_block_continue(rulei->act_block); +} + +void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id) +{ + mlxsw_afa_block_jump(rulei->act_block, group_id); +} + +int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_append_drop(rulei->act_block); +} + +int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct net_device *out_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + bool in_port; + + if (out_dev) { + if (!mlxsw_sp_port_dev_check(out_dev)) + return -EINVAL; + mlxsw_sp_port = netdev_priv(out_dev); + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) + return -EINVAL; + local_port = mlxsw_sp_port->local_port; + in_port = false; + } else { + /* If out_dev is NULL, the called wants to + * set forward to ingress port. + */ + local_port = 0; + in_port = true; + } + return mlxsw_afa_block_append_fwd(rulei->act_block, + local_port, in_port); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_rule *rule; + int err; + + mlxsw_sp_acl_ruleset_ref_inc(ruleset); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + if (!rule) { + err = -ENOMEM; + goto err_alloc; + } + rule->cookie = cookie; + rule->ruleset = ruleset; + + rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rule->rulei)) { + err = PTR_ERR(rule->rulei); + goto err_rulei_create; + } + return rule; + +err_rulei_create: + kfree(rule); +err_alloc: + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + + mlxsw_sp_acl_rulei_destroy(rule->rulei); + kfree(rule); + mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); +} + +int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + int err; + + err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); + if (err) + return err; + + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + ops->rule_del(mlxsw_sp, rule->priv); + return err; +} + +void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); + ops->rule_del(mlxsw_sp, rule->priv); +} + +struct mlxsw_sp_acl_rule * +mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + unsigned long cookie) +{ + return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, + mlxsw_sp_acl_rule_ht_params); +} + +struct mlxsw_sp_acl_rule_info * +mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) +{ + return rule->rulei; +} + +#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 + +static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + u32 kvdl_index; + int ret; + int err; + + /* The first action set of a TCAM entry is stored directly in TCAM, + * not KVD linear area. + */ + if (is_first) + return 0; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_pefa_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, + bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + if (is_first) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, + u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char ppbs_pl[MLXSW_REG_PPBS_LEN]; + u32 kvdl_index; + int ret; + int err; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); + if (ret < 0) + return ret; + kvdl_index = ret; + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); + if (err) + goto err_ppbs_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_ppbs_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, +}; + +int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; + struct mlxsw_sp_acl *acl; + int err; + + acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + if (!acl) + return -ENOMEM; + mlxsw_sp->acl = acl; + + acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_FLEX_KEYS), + mlxsw_sp_afk_blocks, + MLXSW_SP_AFK_BLOCKS_COUNT); + if (!acl->afk) { + err = -ENOMEM; + goto err_afk_create; + } + + acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_ACTIONS_PER_SET), + &mlxsw_sp_act_afa_ops, mlxsw_sp); + if (IS_ERR(acl->afa)) { + err = PTR_ERR(acl->afa); + goto err_afa_create; + } + + err = rhashtable_init(&acl->ruleset_ht, + &mlxsw_sp_acl_ruleset_ht_params); + if (err) + goto err_rhashtable_init; + + err = acl_ops->init(mlxsw_sp, acl->priv); + if (err) + goto err_acl_ops_init; + + acl->ops = acl_ops; + return 0; + +err_acl_ops_init: + rhashtable_destroy(&acl->ruleset_ht); +err_rhashtable_init: + mlxsw_afa_destroy(acl->afa); +err_afa_create: + mlxsw_afk_destroy(acl->afk); +err_afk_create: + kfree(acl); + return err; +} + +void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; + + acl_ops->fini(mlxsw_sp, acl->priv); + rhashtable_destroy(&acl->ruleset_ht); + mlxsw_afa_destroy(acl->afa); + mlxsw_afk_destroy(acl->afk); + kfree(acl); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h new file mode 100644 index 000000000000..82b81cf7f4a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -0,0 +1,109 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H + +#include "core_acl_flex_keys.h" + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { + MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), +}; + +static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), +}; + +#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c new file mode 100644 index 000000000000..a0a968e47ae6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -0,0 +1,1084 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> +#include <linux/parman.h> + +#include "reg.h" +#include "core.h" +#include "resources.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; +}; + +static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + u64 max_tcam_regions; + u64 max_regions; + u64 max_groups; + size_t alloc_size; + int err; + + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_TCAM_REGIONS); + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + + /* Use 1:1 mapping between ACL region and TCAM region */ + if (max_tcam_regions < max_regions) + max_regions = max_tcam_regions; + + alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); + tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_regions) + return -ENOMEM; + tcam->max_regions = max_regions; + + max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); + alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); + tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); + if (!tcam->used_groups) { + err = -ENOMEM; + goto err_alloc_used_groups; + } + tcam->max_groups = max_groups; + tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_GROUP_SIZE); + return 0; + +err_alloc_used_groups: + kfree(tcam->used_regions); + return err; +} + +static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_acl_tcam *tcam = priv; + + kfree(tcam->used_groups); + kfree(tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); + if (id < tcam->max_regions) { + __set_bit(id, tcam->used_regions); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_regions); +} + +static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, + u16 *p_id) +{ + u16 id; + + id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); + if (id < tcam->max_groups) { + __set_bit(id, tcam->used_groups); + *p_id = id; + return 0; + } + return -ENOBUFS; +} + +static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, + u16 id) +{ + __clear_bit(id, tcam->used_groups); +} + +struct mlxsw_sp_acl_tcam_pattern { + const enum mlxsw_afk_element *elements; + unsigned int elements_count; +}; + +struct mlxsw_sp_acl_tcam_group { + struct mlxsw_sp_acl_tcam *tcam; + u16 id; + struct list_head region_list; + unsigned int region_count; + struct rhashtable chunk_ht; + struct { + u16 local_port; + bool ingress; + } bound; + struct mlxsw_sp_acl_tcam_group_ops *ops; + const struct mlxsw_sp_acl_tcam_pattern *patterns; + unsigned int patterns_count; +}; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct parman *parman; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_acl_tcam_group *group; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct { + struct parman_prio parman_prio; + struct parman_item parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp_acl_tcam_chunk { + struct list_head list; /* Member of a TCAM region */ + struct rhash_head ht_node; /* Member of a chunk HT */ + unsigned int priority; /* Priority within the region and group */ + struct parman_prio parman_prio; + struct mlxsw_sp_acl_tcam_group *group; + struct mlxsw_sp_acl_tcam_region *region; + unsigned int ref_count; +}; + +struct mlxsw_sp_acl_tcam_entry { + struct parman_item parman_item; + struct mlxsw_sp_acl_tcam_chunk *chunk; +}; + +static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { + .key_len = sizeof(unsigned int), + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam_region *region; + char pagt_pl[MLXSW_REG_PAGT_LEN]; + int acl_index = 0; + + mlxsw_reg_pagt_pack(pagt_pl, group->id); + list_for_each_entry(region, &group->region_list, list) + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); + mlxsw_reg_pagt_size_set(pagt_pl, acl_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); +} + +static int +mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_sp_acl_tcam_group *group, + const struct mlxsw_sp_acl_tcam_pattern *patterns, + unsigned int patterns_count) +{ + int err; + + group->tcam = tcam; + group->patterns = patterns; + group->patterns_count = patterns_count; + INIT_LIST_HEAD(&group->region_list); + err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); + if (err) + return err; + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + + err = rhashtable_init(&group->chunk_ht, + &mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_init; + + return 0; + +err_rhashtable_init: +err_group_update: + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + return err; +} + +static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam *tcam = group->tcam; + + rhashtable_destroy(&group->chunk_ht); + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + WARN_ON(!list_empty(&group->region_list)); +} + +static int +mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + if (!mlxsw_sp_port_dev_check(dev)) + return -EINVAL; + + mlxsw_sp_port = netdev_priv(dev); + group->bound.local_port = mlxsw_sp_port->local_port; + group->bound.ingress = ingress; + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, + group->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static void +mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group) +{ + char ppbt_pl[MLXSW_REG_PPBT_LEN]; + + mlxsw_reg_ppbt_pack(ppbt_pl, + group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, + group->id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); +} + +static unsigned int +mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + /* As a priority of a region, return priority of the first chunk */ + chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static unsigned int +mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + if (list_empty(®ion->chunk_list)) + return 0; + chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); + return chunk->priority; +} + +static void +mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_region *region2; + struct list_head *pos; + + /* Position the region inside the list according to priority */ + list_for_each(pos, &group->region_list) { + region2 = list_entry(pos, typeof(*region2), list); + if (mlxsw_sp_acl_tcam_region_prio(region2) > + mlxsw_sp_acl_tcam_region_prio(region)) + break; + } + list_add_tail(®ion->list, pos); + group->region_count++; +} + +static void +mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + group->region_count--; + list_del(®ion->list); +} + +static int +mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region) +{ + int err; + + if (group->region_count == group->tcam->max_group_size) + return -ENOBUFS; + + mlxsw_sp_acl_tcam_group_list_add(group, region); + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + region->group = group; + + return 0; + +err_group_update: + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + return err; +} + +static void +mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_tcam_group *group = region->group; + + mlxsw_sp_acl_tcam_group_list_del(group, region); + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); +} + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + bool *p_need_split) +{ + struct mlxsw_sp_acl_tcam_region *region, *region2; + struct list_head *pos; + bool issubset; + + list_for_each(pos, &group->region_list) { + region = list_entry(pos, typeof(*region), list); + + /* First, check if the requested priority does not rather belong + * under some of the next regions. + */ + if (pos->next != &group->region_list) { /* not last */ + region2 = list_entry(pos->next, typeof(*region2), list); + if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) + continue; + } + + issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); + + /* If requested element usage would not fit and the priority + * is lower than the currently inspected region we cannot + * use this region, so return NULL to indicate new region has + * to be created. + */ + if (!issubset && + priority < mlxsw_sp_acl_tcam_region_prio(region)) + return NULL; + + /* If requested element usage would not fit and the priority + * is higher than the currently inspected region we cannot + * use this region. There is still some hope that the next + * region would be the fit. So let it be processed and + * eventually break at the check right above this. + */ + if (!issubset && + priority > mlxsw_sp_acl_tcam_region_max_prio(region)) + continue; + + /* Indicate if the region needs to be split in order to add + * the requested priority. Split is needed when requested + * element usage won't fit into the found region. + */ + *p_need_split = !issubset; + return region; + } + return NULL; /* New region has to be created. */ +} + +static void +mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_afk_element_usage *out) +{ + const struct mlxsw_sp_acl_tcam_pattern *pattern; + int i; + + for (i = 0; i < group->patterns_count; i++) { + pattern = &group->patterns[i]; + mlxsw_afk_element_usage_fill(out, pattern->elements, + pattern->elements_count); + if (mlxsw_afk_element_usage_subset(elusage, out)) + return; + } + memcpy(out, elusage, sizeof(*out)); +} + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct mlxsw_afk_key_info *key_info = region->key_info; + char ptar_pl[MLXSW_REG_PTAR_LEN]; + unsigned int encodings_count; + int i; + int err; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + region->id, region->tcam_region_info); + encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); + for (i = 0; i < encodings_count; i++) { + u16 encoding; + + encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); + mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); + if (err) + return err; + mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); + return 0; +} + +static void +mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + new_size, region->id, region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static int +mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, true, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static void +mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + char pacl_pl[MLXSW_REG_PACL_LEN]; + + mlxsw_reg_pacl_pack(pacl_pl, region->id, false, + region->tcam_region_info); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); +} + +static int +mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + struct mlxsw_sp_acl_rule_info *rulei) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + char *mask; + char *key; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, offset); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (-1UL) + +static int +mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + parman_prio_init(region->parman, parman_prio, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + err = parman_item_add(region->parman, parman_prio, parman_item); + if (err) + goto err_parman_item_add; + + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + + mlxsw_sp_acl_rulei_act_continue(rulei); + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + parman_item->index, rulei); + region->catchall.rulei = rulei; + if (err) + goto err_rule_insert; + + return 0; + +err_rule_insert: +err_rulei_commit: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + parman_item_remove(region->parman, parman_prio, parman_item); +err_parman_item_add: + parman_prio_fini(parman_prio); + return err; +} + +static void +mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + struct parman_prio *parman_prio = ®ion->catchall.parman_prio; + struct parman_item *parman_item = ®ion->catchall.parman_item; + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + parman_item->index); + mlxsw_sp_acl_rulei_destroy(rulei); + parman_item_remove(region->parman, parman_prio, parman_item); + parman_prio_fini(parman_prio); +} + +static void +mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_tcam_region *region = priv; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_tcam_region_parman_resize, + .move = mlxsw_sp_acl_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static struct mlxsw_sp_acl_tcam_region * +mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_tcam_region *region; + int err; + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(®ion->chunk_list); + region->mlxsw_sp = mlxsw_sp; + + region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, + region); + if (!region->parman) { + err = -ENOMEM; + goto err_parman_create; + } + + region->key_info = mlxsw_afk_key_info_get(afk, elusage); + if (IS_ERR(region->key_info)) { + err = PTR_ERR(region->key_info); + goto err_key_info_get; + } + + err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); + if (err) + goto err_region_id_get; + + err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); + if (err) + goto err_tcam_region_alloc; + + err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); + if (err) + goto err_tcam_region_enable; + + err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_tcam_region_catchall_add; + + return region; + +err_tcam_region_catchall_add: + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); +err_tcam_region_enable: + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); +err_tcam_region_alloc: + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); +err_region_id_get: + mlxsw_afk_key_info_put(region->key_info); +err_key_info_get: + parman_destroy(region->parman); +err_parman_create: + kfree(region); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); + mlxsw_afk_key_info_put(region->key_info); + parman_destroy(region->parman); + kfree(region); +} + +static int +mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region; + bool region_created = false; + bool need_split; + int err; + + region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, + &need_split); + if (region && need_split) { + /* According to priority, the chunk should belong to an + * existing region. However, this chunk needs elements + * that region does not contain. We need to split the existing + * region into two and create a new region for this chunk + * in between. This is not supported now. + */ + return -EOPNOTSUPP; + } + if (!region) { + struct mlxsw_afk_element_usage region_elusage; + + mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, + ®ion_elusage); + region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, + ®ion_elusage); + if (IS_ERR(region)) + return PTR_ERR(region); + region_created = true; + } + + chunk->region = region; + list_add_tail(&chunk->list, ®ion->chunk_list); + + if (!region_created) + return 0; + + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); + if (err) + goto err_group_region_attach; + + return 0; + +err_group_region_attach: + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + return err; +} + +static void +mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + list_del(&chunk->list); + if (list_empty(®ion->chunk_list)) { + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + } +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + int err; + + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) + return ERR_PTR(-EINVAL); + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return ERR_PTR(-ENOMEM); + chunk->priority = priority; + chunk->group = group; + chunk->ref_count = 1; + + err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, + elusage, chunk); + if (err) + goto err_chunk_assoc; + + parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + + err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (err) + goto err_rhashtable_insert; + + return chunk; + +err_rhashtable_insert: + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); +err_chunk_assoc: + kfree(chunk); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + struct mlxsw_sp_acl_tcam_group *group = chunk->group; + + rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, + mlxsw_sp_acl_tcam_chunk_ht_params); + parman_prio_fini(&chunk->parman_prio); + mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); + kfree(chunk); +} + +static struct mlxsw_sp_acl_tcam_chunk * +mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + + chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, + mlxsw_sp_acl_tcam_chunk_ht_params); + if (chunk) { + if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, + elusage))) + return ERR_PTR(-EINVAL); + chunk->ref_count++; + return chunk; + } + return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, + priority, elusage); +} + +static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + if (--chunk->ref_count) + return; + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); +} + +static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_entry *entry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_region *region; + int err; + + chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, + &rulei->values.elusage); + if (IS_ERR(chunk)) + return PTR_ERR(chunk); + + region = chunk->region; + err = parman_item_add(region->parman, &chunk->parman_prio, + &entry->parman_item); + if (err) + goto err_parman_item_add; + + err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, + entry->parman_item.index, + rulei); + if (err) + goto err_rule_insert; + entry->chunk = chunk; + + return 0; + +err_rule_insert: + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); +err_parman_item_add: + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); + return err; +} + +static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, + entry->parman_item.index); + parman_item_remove(region->parman, &chunk->parman_prio, + &entry->parman_item); + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); +} + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { + MLXSW_AFK_ELEMENT_SRC_SYS_PORT, + MLXSW_AFK_ELEMENT_DMAC, + MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP4, + MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { + MLXSW_AFK_ELEMENT_ETHERTYPE, + MLXSW_AFK_ELEMENT_IP_PROTO, + MLXSW_AFK_ELEMENT_SRC_IP6_HI, + MLXSW_AFK_ELEMENT_SRC_IP6_LO, + MLXSW_AFK_ELEMENT_DST_IP6_HI, + MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_DST_L4_PORT, + MLXSW_AFK_ELEMENT_SRC_L4_PORT, +}; + +static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv4, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), + }, + { + .elements = mlxsw_sp_acl_tcam_pattern_ipv6, + .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), + }, +}; + +#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ + ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) + +struct mlxsw_sp_acl_tcam_flower_ruleset { + struct mlxsw_sp_acl_tcam_group group; +}; + +struct mlxsw_sp_acl_tcam_flower_rule { + struct mlxsw_sp_acl_tcam_entry entry; +}; + +static int +mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, + void *priv, void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam *tcam = priv; + + return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, + struct net_device *dev, bool ingress) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, + dev, ingress); +} + +static void +mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, + &rule->entry, rulei); +} + +static void +mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) +{ + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); +} + +static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { + .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), + .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, + .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, + .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, + .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, + .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops_arr[] = { + [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, +}; + +static const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + + if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) + return NULL; + ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; + if (WARN_ON(!ops)) + return NULL; + return ops; +} + +const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp_acl_tcam), + .init = mlxsw_sp_acl_tcam_init, + .fini = mlxsw_sp_acl_tcam_fini, + .profile_ops = mlxsw_sp_acl_tcam_profile_ops, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c new file mode 100644 index 000000000000..35b147a47eb5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -0,0 +1,309 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> + +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tcf_exts *exts) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (tc_no_actions(exts)) + return 0; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + err = mlxsw_sp_acl_rulei_act_drop(rulei); + if (err) + return err; + } else if (is_tcf_mirred_egress_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out_dev; + + out_dev = __dev_get_by_index(dev_net(dev), ifindex); + if (out_dev == dev) + out_dev = NULL; + + err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, + out_dev); + if (err) + return err; + } else { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); + return -EOPNOTSUPP; + } + } + return 0; +} + +static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, + ntohl(key->src), ntohl(mask->src)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, + ntohl(key->dst), ntohl(mask->dst)); +} + +static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + size_t addr_half_size = sizeof(key->src) / 2; + + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, + &key->src.s6_addr[0], + &mask->src.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, + &key->src.s6_addr[addr_half_size], + &mask->src.s6_addr[addr_half_size], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, + &key->dst.s6_addr[0], + &mask->dst.s6_addr[0], + addr_half_size); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, + &key->dst.s6_addr[addr_half_size], + &mask->dst.s6_addr[addr_half_size], + addr_half_size); +} + +static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u8 ip_proto) +{ + struct flow_dissector_key_ports *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) + return 0; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, + ntohs(key->dst), ntohs(mask->dst)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + ntohs(key->src), ntohs(mask->src)); + return 0; +} + +static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f) +{ + u16 addr_type = 0; + u8 ip_proto = 0; + int err; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); + return -EOPNOTSUPP; + } + + mlxsw_sp_acl_rulei_priority(rulei, f->prio); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + addr_type = key->addr_type; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_ETHERTYPE, + ntohs(key->n_proto), + ntohs(mask->n_proto)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_IP_PROTO, + key->ip_proto, mask->ip_proto); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_DMAC, + key->dst, mask->dst, + sizeof(key->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC, + key->src, mask->src, + sizeof(key->src)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + mlxsw_sp_flower_parse_ipv4(rulei, f); + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + mlxsw_sp_flower_parse_ipv6(rulei, f); + + err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); + if (err) + return err; + + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); +} + +int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + __be16 protocol, struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_acl_rule_info *rulei; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + int err; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } + + rulei = mlxsw_sp_acl_rule_rulei(rule); + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); + if (err) + goto err_flower_parse; + + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + + err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); + if (err) + goto err_rule_add; + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return 0; + +err_rule_add: +err_rulei_commit: +err_flower_parse: + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); +err_rule_create: + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return err; +} + +void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, + ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (WARN_ON(IS_ERR(ruleset))) + return; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); + if (!WARN_ON(!rule)) { + mlxsw_sp_acl_rule_del(mlxsw_sp, rule); + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); + } + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2e88115e8735..169193ee3d4b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -382,7 +382,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 * +static void mlxsw_sx_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -411,7 +411,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev, tx_dropped += p->tx_dropped; } stats->tx_dropped = tx_dropped; - return stats; } static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7ab275deacac..02ea48b15eb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 20cb85bc0c5f..d2106159473f 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) /* Relinquish the SKB to the network layer */ skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + napi_gro_receive(&ksp->napi, skb); /* Record stats */ ndev->stats.rx_packets++; @@ -561,18 +561,17 @@ rx_finished: static int ks8695_poll(struct napi_struct *napi, int budget) { struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); - unsigned long work_done; - unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); + int work_done; work_done = ks8695_rx(ksp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; + spin_lock_irqsave(&ksp->rx_lock, flags); - __napi_complete(napi); - /*enable rx interrupt*/ + /* enable rx interrupt */ writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); spin_unlock_irqrestore(&ksp->rx_lock, flags); } diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index e7e1aff40bd9..955d69a8e8d3 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -84,7 +84,6 @@ union ks8851_tx_hdr { * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. * @rc_rxqcr: Cached copy of KS_RXQCR. - * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. * @vdd_reg: Optional regulator supplying the chip * @vdd_io: Optional digital power supply for IO @@ -120,7 +119,6 @@ struct ks8851_net { u16 rc_ier; u16 rc_rxqcr; u16 rc_ccr; - u16 eeprom_size; struct mii_if_info mii; struct ks8851_rxctrl rxctrl; @@ -1533,11 +1531,6 @@ static int ks8851_probe(struct spi_device *spi) /* cache the contents of the CCR register for EEPROM, etc. */ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); - if (ks->rc_ccr & CCR_EEPROM) - ks->eeprom_size = 128; - else - ks->eeprom_size = 0; - ks8851_read_selftest(ks); ks8851_init_mac(ks); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 9774b50cff6e..06c9f4100cb9 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -269,7 +269,7 @@ rx_next: } if (rx < budget) { - napi_complete(napi); + napi_complete_done(napi, rx); } priv->reg_imr |= RPKT_FINISH_M; @@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev) spin_unlock_irq(&priv->txlock); } -static struct net_device_ops moxart_netdev_ops = { +static const struct net_device_ops moxart_netdev_ops = { .ndo_open = moxart_mac_open, .ndo_stop = moxart_mac_stop, .ndo_start_xmit = moxart_mac_start_xmit, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e506ca876d0d..1139d1803e7e 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -191,21 +191,6 @@ struct myri10ge_slice_state { int cpu; __be32 __iomem *dca_tag; #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define SLICE_STATE_IDLE 0 -#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */ -#define SLICE_STATE_POLL 2 /* poll owns this slice */ -#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL) -#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */ -#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */ -#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD) - spinlock_t lock; - unsigned long lock_napi_yield; - unsigned long lock_poll_yield; - unsigned long busy_poll_miss; - unsigned long busy_poll_cnt; -#endif /* CONFIG_NET_RX_BUSY_POLL */ char irq_desc[32]; }; @@ -378,8 +363,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p) __raw_writel((__force __u32) val, (__force void __iomem *)p); } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) { @@ -925,92 +910,6 @@ abort: return status; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ - spin_lock_init(&ss->lock); - ss->state = SLICE_STATE_IDLE; -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state |= SLICE_STATE_NAPI_YIELD; - rc = false; - ss->lock_napi_yield++; - } else - ss->state = SLICE_STATE_NAPI; - spin_unlock(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ - spin_lock(&ss->lock); - WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); - ss->state = SLICE_STATE_IDLE; - spin_unlock(&ss->lock); -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - bool rc = true; - spin_lock_bh(&ss->lock); - if ((ss->state & SLICE_LOCKED)) { - ss->state |= SLICE_STATE_POLL_YIELD; - rc = false; - ss->lock_poll_yield++; - } else - ss->state |= SLICE_STATE_POLL; - spin_unlock_bh(&ss->lock); - return rc; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ - spin_lock_bh(&ss->lock); - WARN_ON((ss->state & SLICE_STATE_NAPI)); - ss->state = SLICE_STATE_IDLE; - spin_unlock_bh(&ss->lock); -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - WARN_ON(!(ss->state & SLICE_LOCKED)); - return (ss->state & SLICE_USER_PEND); -} -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) -{ - return false; -} - -static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) -{ -} - -static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) -{ - return false; -} -#endif - static int myri10ge_reset(struct myri10ge_priv *mgp) { struct myri10ge_cmd cmd; @@ -1426,7 +1325,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; - bool polling; if (len <= mgp->small_bytes) { rx = &ss->rx_small; @@ -1441,15 +1339,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); - /* When busy polling in user context, allocate skb and copy headers to - * skb's linear memory ourselves. When not busy polling, use the napi - * gro api. - */ - polling = myri10ge_ss_busy_polling(ss); - if (polling) - skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); - else - skb = napi_get_frags(&ss->napi); + skb = napi_get_frags(&ss->napi); if (unlikely(skb == NULL)) { ss->stats.rx_dropped++; for (i = 0, remainder = len; remainder > 0; i++) { @@ -1489,27 +1379,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - if (polling) { - int hlen; - - /* myri10ge_vlan_rx might have moved the header, so compute - * length and address again. - */ - hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN; - va = page_address(skb_frag_page(&rx_frags[0])) + - rx_frags[0].page_offset; - /* Copy header into the skb linear memory */ - skb_copy_to_linear_data(skb, va, hlen); - rx_frags[0].page_offset += hlen; - rx_frags[0].size -= hlen; - skb->data_len -= hlen; - skb->tail += hlen; - skb->protocol = eth_type_trans(skb, dev); - skb_mark_napi_id(skb, &ss->napi); - netif_receive_skb(skb); - } - else - napi_gro_frags(&ss->napi); + napi_gro_frags(&ss->napi); return 1; } @@ -1669,49 +1539,16 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) if (ss->mgp->dca_enabled) myri10ge_update_dca(ss); #endif - /* Try later if the busy_poll handler is running. */ - if (!myri10ge_ss_lock_napi(ss)) - return budget; - /* process as many rx events as NAPI will allow */ work_done = myri10ge_clean_rx_done(ss, budget); - myri10ge_ss_unlock_napi(ss); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); put_be32(htonl(3), ss->irq_claim); } return work_done; } -#ifdef CONFIG_NET_RX_BUSY_POLL -static int myri10ge_busy_poll(struct napi_struct *napi) -{ - struct myri10ge_slice_state *ss = - container_of(napi, struct myri10ge_slice_state, napi); - struct myri10ge_priv *mgp = ss->mgp; - int work_done; - - /* Poll only when the link is up */ - if (mgp->link_state != MXGEFW_LINK_UP) - return LL_FLUSH_FAILED; - - if (!myri10ge_ss_lock_poll(ss)) - return LL_FLUSH_BUSY; - - /* Process a small number of packets */ - work_done = myri10ge_clean_rx_done(ss, 4); - if (work_done) - ss->busy_poll_cnt += work_done; - else - ss->busy_poll_miss++; - - myri10ge_ss_unlock_poll(ss); - - return work_done; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - static irqreturn_t myri10ge_intr(int irq, void *arg) { struct myri10ge_slice_state *ss = arg; @@ -1919,10 +1756,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", "wake_queue", "stop_queue", "tx_linearized", -#ifdef CONFIG_NET_RX_BUSY_POLL - "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss", - "rx_busy_poll_cnt", -#endif }; #define MYRI10GE_NET_STATS_LEN 21 @@ -2022,12 +1855,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ss->tx.wake_queue; data[i++] = (unsigned int)ss->tx.stop_queue; data[i++] = (unsigned int)ss->tx.linearized; -#ifdef CONFIG_NET_RX_BUSY_POLL - data[i++] = ss->lock_napi_yield; - data[i++] = ss->lock_poll_yield; - data[i++] = ss->busy_poll_miss; - data[i++] = ss->busy_poll_cnt; -#endif } } @@ -2589,9 +2416,6 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - /* Initialize the slice spinlock and state used for polling */ - myri10ge_ss_init_lock(ss); - /* must happen prior to any irq */ napi_enable(&(ss)->napi); } @@ -2668,19 +2492,9 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - for (i = 0; i < mgp->num_slices; i++) { + for (i = 0; i < mgp->num_slices; i++) napi_disable(&mgp->ss[i].napi); - local_bh_disable(); /* myri10ge_ss_lock_napi needs this */ - /* Lock the slice to prevent the busy_poll handler from - * accessing it. Later when we bring the NIC up, myri10ge_open - * resets the slice including this lock. - */ - while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { - pr_info("Slice %d locked\n", i); - mdelay(1); - } - local_bh_enable(); - } + netif_carrier_off(dev); netif_tx_stop_all_queues(dev); @@ -3119,8 +2933,8 @@ drop: return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { const struct myri10ge_priv *mgp = netdev_priv(dev); const struct myri10ge_slice_netstats *slice_stats; @@ -3135,7 +2949,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, stats->rx_dropped += slice_stats->rx_dropped; stats->tx_dropped += slice_stats->tx_dropped; } - return stats; } static void myri10ge_set_multicast_list(struct net_device *dev) @@ -3954,9 +3767,6 @@ static const struct net_device_ops myri10ge_netdev_ops = { .ndo_change_mtu = myri10ge_change_mtu, .ndo_set_rx_mode = myri10ge_set_multicast_list, .ndo_set_mac_address = myri10ge_set_mac_address, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = myri10ge_busy_poll, -#endif }; static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 90eac63f9606..8e72679c015f 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2265,7 +2265,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget) np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - napi_complete(napi); + napi_complete_done(napi, work_done); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 564f682fa4dc..203abcb0c252 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /*Re Enable MSI-Rx Vector*/ addr = (u8 __iomem *)&bar0->xmsi_mask_reg; addr += 7 - ring->ring_no; @@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) break; } if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ writeq(0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index e07b936f64ec..6a4310af5d97 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_vpath_poll_rx(ring->handle); pkts_processed = ring->pkts_processed; - if (ring->pkts_processed < budget_org) { - napi_complete(napi); + if (pkts_processed < budget_org) { + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( @@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) VXGE_COMPLETE_ALL_TX(vdev); if (pkts_processed < budget_org) { - napi_complete(napi); + napi_complete_done(napi, pkts_processed); /* Re enable the Rx interrupts for the ring */ vxge_hw_device_unmask_all(hldev); vxge_hw_device_flush_io(hldev); @@ -3111,7 +3111,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu) * @stats: pointer to struct rtnl_link_stats64 * */ -static struct rtnl_link_stats64 * +static void vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct vxgedev *vdev = netdev_priv(dev); @@ -3150,8 +3150,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) net_stats->tx_bytes += bytes; net_stats->tx_errors += txstats->tx_errors; } - - return net_stats; } static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e8d448109e03..6ac43abf561b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -42,6 +42,7 @@ */ #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -1459,7 +1460,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, dev_kfree_skb_any(skb); } -static void +static bool nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, @@ -1473,13 +1474,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (unlikely(nfp_net_tx_full(tx_ring, 1))) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); if (unlikely(!new_frag)) { nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return; + return false; } nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); @@ -1509,6 +1510,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, tx_ring->wr_p++; tx_ring->wr_ptr_add++; + return true; } static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) @@ -1613,12 +1615,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) case XDP_PASS: break; case XDP_TX: - nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len); + if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + tx_ring, rxbuf, + pkt_off, pkt_len))) + trace_xdp_exception(nn->netdev, xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(nn->netdev, xdp_prog, act); case XDP_DROP: nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); @@ -2638,8 +2643,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); } -static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nfp_net_stat64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nfp_net *nn = netdev_priv(netdev); int r; @@ -2669,8 +2674,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } - - return stats; } static bool nfp_net_ebpf_capable(struct nfp_net *nn) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 3913f07279d2..58ba5d3f9e5f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev) * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */ -static struct rtnl_link_stats64* +static void nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) __acquires(&netdev_priv(dev)->hwstats_lock) __releases(&netdev_priv(dev)->hwstats_lock) @@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) spin_unlock_bh(&np->hwstats_lock); } - - return storage; } /* @@ -3751,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - napi_complete(napi); + napi_complete_done(napi, rx_work); writel(np->irqmask, base + NvRegIrqMask); } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index dd6b0d0f7fa5..9c7ffd649e9a 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget) rx_done = __lpc_handle_recv(ndev, budget); if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); lpc_eth_enable_int(pldat->net_base); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index d461f419948e..f9e4e8eca665 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2385,7 +2385,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) poll_end_flag = true; if (poll_end_flag) { - napi_complete(napi); + napi_complete_done(napi, work_done); pch_gbe_irq_enable(adapter); } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index badfa1d562a4..49591d9c2e1b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget) pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); if (pkts < budget) { /* all done, no more packets present */ - napi_complete(napi); + napi_complete_done(napi, pkts); pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 561fb94c7267..0cf8a3703275 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data); static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netxen_nic_set_mac(struct net_device *netdev, void *p); /* PCI Device ID Table */ @@ -2302,8 +2302,8 @@ request_reset: clear_bit(__NX_RESETTING, &adapter->state); } -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct netxen_adapter *adapter = netdev_priv(netdev); @@ -2313,8 +2313,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; - - return stats; } static irqreturn_t netxen_intr(int irq, void *data) @@ -2398,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 44c184ebe3b0..1f61cf3209e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_H @@ -27,7 +51,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.10.9.20" +#define DRV_MODULE_VERSION "8.10.10.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 0c42c240b5cf..dcb8fc185df7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 2b8bdaa77800..98f4973cac9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index a4789a93b692..dc0d2c9ad6b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 9ba681643d05..d70300fda020 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3b2250021c5f..33e720143b8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index b6711c106597..5d37ba24da40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 785ab03683eb..5d31189288e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 6e4fae9b1430..1f606516b6aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index d01557092868..9277264d2e65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 23e455f22adc..d891a6852695 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d567ba94c8d1..243b64e0d4dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index 1e832049983d..555dd086796d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index c68dbf7092b1..84310b60849b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 0948be64dc78..0ae0bb4593ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 17a70122df05..3a44d6b395fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 67c25f3db4d5..20c187f4ed0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 6a3727c4c0c6..7520eb34ad00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -74,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->cid = cid; p_cid->vf_qid = vf_qid; p_cid->rel = *p_params; + p_cid->p_owner = p_hwfn; /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { @@ -248,76 +273,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, - struct qed_rss_params *p_params) + struct qed_rss_params *p_rss) { - struct eth_vport_rss_config *rss = &p_ramrod->rss_config; - u16 abs_l2_queue = 0, capabilities = 0; - int rc = 0, i; + struct eth_vport_rss_config *p_config; + u16 capabilities = 0; + int i, table_size; + int rc = 0; - if (!p_params) { + if (!p_rss) { p_ramrod->common.update_rss_flg = 0; return rc; } + p_config = &p_ramrod->rss_config; - BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != - ETH_RSS_IND_TABLE_ENTRIES_NUM); + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); - rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); + rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); if (rc) return rc; - p_ramrod->common.update_rss_flg = p_params->update_rss_config; - rss->update_rss_capabilities = p_params->update_rss_capabilities; - rss->update_rss_ind_table = p_params->update_rss_ind_table; - rss->update_rss_key = p_params->update_rss_key; + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; - rss->rss_mode = p_params->rss_enable ? - ETH_VPORT_RSS_MODE_REGULAR : - ETH_VPORT_RSS_MODE_DISABLED; + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4)); + !!(p_rss->rss_caps & QED_RSS_IPV4)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6)); + !!(p_rss->rss_caps & QED_RSS_IPV6)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); + !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); + !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, - !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); - rss->tbl_size = p_params->rss_table_size_log; + !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; - rss->capabilities = cpu_to_le16(capabilities); + p_config->capabilities = cpu_to_le16(capabilities); DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", p_ramrod->common.update_rss_flg, - rss->rss_mode, rss->update_rss_capabilities, - capabilities, rss->update_rss_ind_table, - rss->update_rss_key); + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - rc = qed_fw_l2_queue(p_hwfn, - (u8)p_params->rss_ind_table[i], - &abs_l2_queue); - if (rc) - return rc; + table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, + 1 << p_config->tbl_size); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; + + if (!p_queue) + return -EINVAL; - rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", - i, rss->indirection_table[i]); + p_config->indirection_table[i] = + cpu_to_le16(p_queue->abs.queue_id); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "Configured RSS indirection table [%d entries]:\n", + table_size); + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + le16_to_cpu(p_config->indirection_table[i]), + le16_to_cpu(p_config->indirection_table[i + 1]), + le16_to_cpu(p_config->indirection_table[i + 2]), + le16_to_cpu(p_config->indirection_table[i + 3]), + le16_to_cpu(p_config->indirection_table[i + 4]), + le16_to_cpu(p_config->indirection_table[i + 5]), + le16_to_cpu(p_config->indirection_table[i + 6]), + le16_to_cpu(p_config->indirection_table[i + 7]), + le16_to_cpu(p_config->indirection_table[i + 8]), + le16_to_cpu(p_config->indirection_table[i + 9]), + le16_to_cpu(p_config->indirection_table[i + 10]), + le16_to_cpu(p_config->indirection_table[i + 11]), + le16_to_cpu(p_config->indirection_table[i + 12]), + le16_to_cpu(p_config->indirection_table[i + 13]), + le16_to_cpu(p_config->indirection_table[i + 14]), + le16_to_cpu(p_config->indirection_table[i + 15])); } for (i = 0; i < 10; i++) - rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); + p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); return rc; } @@ -1729,13 +1781,31 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, int max_vf_mac_filters = 0; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - for_each_hwfn(cdev, i) - info->num_queues += - FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); - if (cdev->int_params.fp_msix_cnt) - info->num_queues = - min_t(u8, info->num_queues, - cdev->int_params.fp_msix_cnt); + u16 num_queues = 0; + + /* Since the feature controls only queue-zones, + * make sure we have the contexts [rx, tx, xdp] to + * match. + */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + u16 l2_queues = (u16)FEAT_NUM(hwfn, + QED_PF_L2_QUE); + u16 cids; + + cids = hwfn->pf_params.eth_pf_params.num_cons; + num_queues += min_t(u16, l2_queues, cids / 3); + } + + /* queues might theoretically be >256, but interrupts' + * upper-limit guarantes that it would fit in a u8. + */ + if (cdev->int_params.fp_msix_cnt) { + u8 irqs = cdev->int_params.fp_msix_cnt; + + info->num_queues = (u8)min_t(u16, + num_queues, irqs); + } } else { info->num_queues = cdev->num_hwfns; } @@ -1776,7 +1846,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_fill_dev_info(cdev, &info->common); if (IS_VF(cdev)) - memset(info->common.hw_mac, 0, ETH_ALEN); + eth_zero_addr(info->common.hw_mac); return 0; } @@ -1857,18 +1927,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) return 0; } +static int qed_update_vport_rss(struct qed_dev *cdev, + struct qed_update_vport_rss_params *input, + struct qed_rss_params *rss) +{ + int i, fn; + + /* Update configuration with what's correct regardless of CMT */ + rss->update_rss_config = 1; + rss->rss_enable = 1; + rss->update_rss_capabilities = 1; + rss->update_rss_ind_table = 1; + rss->update_rss_key = 1; + rss->rss_caps = input->rss_caps; + memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + + /* In regular scenario, we'd simply need to take input handlers. + * But in CMT, we'd have to split the handlers according to the + * engine they were configured on. We'd then have to understand + * whether RSS is really required, since 2-queues on CMT doesn't + * require RSS. + */ + if (cdev->num_hwfns == 1) { + memcpy(rss->rss_ind_table, + input->rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(void *)); + rss->rss_table_size_log = 7; + return 0; + } + + /* Start by copying the non-spcific information to the 2nd copy */ + memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); + + /* CMT should be round-robin */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + struct qed_queue_cid *cid = input->rss_ind_table[i]; + struct qed_rss_params *t_rss; + + if (cid->p_owner == QED_LEADING_HWFN(cdev)) + t_rss = &rss[0]; + else + t_rss = &rss[1]; + + t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; + } + + /* Make sure RSS is actually required */ + for_each_hwfn(cdev, fn) { + for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { + if (rss[fn].rss_ind_table[i] != + rss[fn].rss_ind_table[0]) + break; + } + if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { + DP_VERBOSE(cdev, NETIF_MSG_IFUP, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + return -EINVAL; + } + rss[fn].rss_table_size_log = 6; + } + + return 0; +} + static int qed_update_vport(struct qed_dev *cdev, struct qed_update_vport_params *params) { struct qed_sp_vport_update_params sp_params; - struct qed_rss_params sp_rss_params; - int rc, i; + struct qed_rss_params *rss; + int rc = 0, i; if (!cdev) return -ENODEV; + rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); + if (!rss) + return -ENOMEM; + memset(&sp_params, 0, sizeof(sp_params)); - memset(&sp_rss_params, 0, sizeof(sp_rss_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; @@ -1882,66 +2018,24 @@ static int qed_update_vport(struct qed_dev *cdev, sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; - /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. - * We need to re-fix the rss values per engine for CMT. - */ - if (cdev->num_hwfns > 1 && params->update_rss_flg) { - struct qed_update_vport_rss_params *rss = ¶ms->rss_params; - int k, max = 0; - - /* Find largest entry, since it's possible RSS needs to - * be disabled [in case only 1 queue per-hwfn] - */ - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - max = (max > rss->rss_ind_table[k]) ? - max : rss->rss_ind_table[k]; - - /* Either fix RSS values or disable RSS */ - if (cdev->num_hwfns < max + 1) { - int divisor = (max + cdev->num_hwfns - 1) / - cdev->num_hwfns; - - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - fixing RSS values (modulo %02x)\n", - divisor); - - for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) - rss->rss_ind_table[k] = - rss->rss_ind_table[k] % divisor; - } else { - DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "CMT - 1 queue per-hwfn; Disabling RSS\n"); + /* Prepare the RSS configuration */ + if (params->update_rss_flg) + if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) params->update_rss_flg = 0; - } - } - - /* Now, update the RSS configuration for actual configuration */ - if (params->update_rss_flg) { - sp_rss_params.update_rss_config = 1; - sp_rss_params.rss_enable = 1; - sp_rss_params.update_rss_capabilities = 1; - sp_rss_params.update_rss_ind_table = 1; - sp_rss_params.update_rss_key = 1; - sp_rss_params.rss_caps = params->rss_params.rss_caps; - sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ - memcpy(sp_rss_params.rss_ind_table, - params->rss_params.rss_ind_table, - QED_RSS_IND_TABLE_SIZE * sizeof(u16)); - memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, - QED_RSS_KEY_SIZE * sizeof(u32)); - sp_params.rss_params = &sp_rss_params; - } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (params->update_rss_flg) + sp_params.rss_params = &rss[i]; + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_vport_update(p_hwfn, &sp_params, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(cdev, "Failed to update VPORT\n"); - return rc; + goto out; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), @@ -1950,7 +2044,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg); } - return 0; +out: + vfree(rss); + return rc; } static int qed_start_rxq(struct qed_dev *cdev, @@ -2114,11 +2210,14 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; - if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 48c9bfc28140..93cb932ef663 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_L2_H #define _QED_L2_H @@ -15,6 +39,20 @@ #include "qed.h" #include "qed_hw.h" #include "qed_sp.h" +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; struct qed_sge_tpa_params { u8 max_buffers_per_cqe; @@ -132,18 +170,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); -struct qed_rss_params { - u8 update_rss_config; - u8 rss_enable; - u8 rss_eng_id; - u8 update_rss_capabilities; - u8 update_rss_ind_table; - u8 update_rss_key; - u8 rss_caps; - u8 rss_table_size_log; - u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; - u32 rss_key[QED_RSS_KEY_SIZE]; -}; struct qed_filter_accept_flags { u8 update_rx_mode_config; @@ -263,6 +289,8 @@ struct qed_queue_cid { /* Legacy VFs might have Rx producer located elsewhere */ bool b_legacy_vf; + + struct qed_hwfn *p_owner; }; void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 873ce2cd76ba..02c5d47cfc6d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 31417928b635..db3e4fc78e09 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index aeb98d8c5626..93eee83ccdc3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/stddef.h> @@ -853,6 +877,17 @@ static void qed_update_pf_params(struct qed_dev *cdev, params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } + /* In case we might support RDMA, don't allow qede to be greedy + * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. + */ + if (QED_LEADING_HWFN(cdev)->hw_info.personality == + QED_PCI_ETH_ROCE) { + u16 *num_cons; + + num_cons = ¶ms->eth_pf_params.num_cons; + *num_cons = min_t(u16, *num_cons, 192); + } + for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6dd3ce443484..c8a877594032 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> @@ -1098,7 +1122,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + if (!IS_ENABLED(CONFIG_QED_RDMA)) + *p_proto = QED_PCI_ETH; + else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 407a2c1830fb..363dce0f16b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 155abcb507fd..7d731c6cb892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 7a0670a9a074..4f138fb5f533 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 97544205a8c1..b6722c6ff761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 2dbdb3298991..c3c8c5018e93 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 279f342af8db..36cf4b2ab7fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index 48bfaecaf6dc..1bafc05db2b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,3 +1,35 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + #include <linux/crc32.h> #include "qed.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 9c897bc68d05..043882959606 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a39ef2e7a9a6..097a72987572 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f022469bdcf8..645328a9f0cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 85b09dd1787a..3f4bf31f45e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,13 +1,38 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/crc32.h> +#include <linux/vmalloc.h> #include <linux/qed/qed_iov_if.h> #include "qed_cxt.h" #include "qed_hsi.h" @@ -1199,7 +1224,10 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) return; /* Clear the VF mac */ - memset(vf_info->mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->mac); + + vf_info->rx_accept_mode = 0; + vf_info->tx_accept_mode = 0; } static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, @@ -2294,12 +2322,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, struct qed_sp_vport_update_params *p_data, struct qed_rss_params *p_rss, - struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) + struct qed_iov_vf_mbx *p_mbx, + u16 *tlvs_mask, u16 *tlvs_accepted) { struct vfpf_vport_update_rss_tlv *p_rss_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; - u16 i, q_idx, max_q_idx; + bool b_reject = false; u16 table_size; + u16 i, q_idx; p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); @@ -2323,34 +2353,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, p_rss->rss_eng_id = vf->relative_vf_id + 1; p_rss->rss_caps = p_rss_tlv->rss_caps; p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; - memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, - sizeof(p_rss->rss_ind_table)); memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), (1 << p_rss_tlv->rss_table_size_log)); - max_q_idx = ARRAY_SIZE(vf->vf_queues); - for (i = 0; i < table_size; i++) { - u16 index = vf->vf_queues[0].fw_rx_qid; + q_idx = p_rss_tlv->rss_ind_table[i]; + if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to wrong queue %04x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } - q_idx = p_rss->rss_ind_table[i]; - if (q_idx >= max_q_idx) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is out of range\n", - i, q_idx); - else if (!vf->vf_queues[q_idx].p_rx_cid) - DP_NOTICE(p_hwfn, - "rss_ind_table[%d] = %d, rxq is not active\n", - i, q_idx); - else - index = vf->vf_queues[q_idx].fw_rx_qid; - p_rss->rss_ind_table[i] = index; + if (!vf->vf_queues[q_idx].p_rx_cid) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to inactive queue %08x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } + + p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; } p_data->rss_params = p_rss; +out: *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; + if (!b_reject) + *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; } static void @@ -2401,16 +2436,49 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; } +static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, + u8 vfid, + struct qed_sp_vport_update_params *params, + u16 *tlvs) +{ + u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + struct qed_filter_accept_flags *flags = ¶ms->accept_flags; + struct qed_public_vf_info *vf_info; + + /* Untrusted VFs can't even be trusted to know that fact. + * Simply indicate everything is configured fine, and trace + * configuration 'behind their back'. + */ + if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) + return 0; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (flags->update_rx_mode_config) { + vf_info->rx_accept_mode = flags->rx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->rx_accept_filter &= ~mask; + } + + if (flags->update_tx_mode_config) { + vf_info->tx_accept_mode = flags->tx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->tx_accept_filter &= ~mask; + } + + return 0; +} + static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { + struct qed_rss_params *p_rss_params = NULL; struct qed_sp_vport_update_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_sge_tpa_params sge_tpa_params; - struct qed_rss_params rss_params; + u16 tlvs_mask = 0, tlvs_accepted = 0; u8 status = PFVF_STATUS_SUCCESS; - u16 tlvs_mask = 0; u16 length; int rc; @@ -2423,6 +2491,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; goto out; } + p_rss_params = vzalloc(sizeof(*p_rss_params)); + if (p_rss_params == NULL) { + status = PFVF_STATUS_FAILURE; + goto out; + } memset(¶ms, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; @@ -2437,20 +2510,33 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); - qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, - mbx, &tlvs_mask); qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, &sge_tpa_params, mbx, &tlvs_mask); - /* Just log a message if there is no single extended tlv in buffer. - * When all features of vport update ramrod would be requested by VF - * as extended TLVs in buffer then an error can be returned in response - * if there is no extended TLV present in buffer. + tlvs_accepted = tlvs_mask; + + /* Some of the extended TLVs need to be validated first; In that case, + * they can update the mask without updating the accepted [so that + * PF could communicate to VF it has rejected request]. */ - if (!tlvs_mask) { - DP_NOTICE(p_hwfn, - "No feature tlvs found for vport update\n"); + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, + mbx, &tlvs_mask, &tlvs_accepted); + + if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted)) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Upper-layer prevents VF vport configuration\n"); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No feature tlvs found for vport update\n"); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } @@ -2461,8 +2547,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, status = PFVF_STATUS_FAILURE; out: + vfree(p_rss_params); length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, - tlvs_mask, tlvs_mask); + tlvs_mask, tlvs_accepted); qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } @@ -2539,8 +2626,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(p_vf->shadow_config.macs[i], p_params->mac)) { - memset(p_vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); break; } } @@ -2553,7 +2639,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) - memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + eth_zero_addr(p_vf->shadow_config.macs[i]); } /* List the new MAC address */ @@ -3892,6 +3978,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev, return 0; } +static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(hwfn, vfid)) { + DP_NOTICE(hwfn, + "SR-IOV sanity check failed, can't set trust\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (vf->is_trusted_request == trust) + return 0; + vf->is_trusted_request = trust; + + qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); + } + + return 0; +} + static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; @@ -3996,6 +4108,61 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) +{ + struct qed_sp_vport_update_params params; + struct qed_filter_accept_flags *flags; + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 mask; + int i; + + mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + flags = ¶ms.accept_flags; + + qed_for_each_vf(hwfn, i) { + /* Need to make sure current requested configuration didn't + * flip so that we'll end up configuring something that's not + * needed. + */ + vf_info = qed_iov_get_public_vf_info(hwfn, i, true); + if (vf_info->is_trusted_configured == + vf_info->is_trusted_request) + continue; + vf_info->is_trusted_configured = vf_info->is_trusted_request; + + /* Validate that the VF has a configured vport */ + vf = qed_iov_get_vf_info(hwfn, i, true); + if (!vf->vport_instance) + continue; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + + if (vf_info->rx_accept_mode & mask) { + flags->update_rx_mode_config = 1; + flags->rx_accept_filter = vf_info->rx_accept_mode; + } + + if (vf_info->tx_accept_mode & mask) { + flags->update_tx_mode_config = 1; + flags->tx_accept_filter = vf_info->tx_accept_mode; + } + + /* Remove if needed; Otherwise this would set the mask */ + if (!vf_info->is_trusted_configured) { + flags->rx_accept_filter &= ~mask; + flags->tx_accept_filter &= ~mask; + } + + if (flags->update_rx_mode_config || + flags->update_tx_mode_config) + qed_sp_vport_update(hwfn, ¶ms, + QED_SPQ_MODE_EBLOCK, NULL); + } +} + static void qed_iov_pf_task(struct work_struct *work) { @@ -4031,6 +4198,9 @@ static void qed_iov_pf_task(struct work_struct *work) if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, &hwfn->iov_task_flags)) qed_handle_bulletin_post(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) + qed_iov_handle_trust_change(hwfn); } void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) @@ -4093,4 +4263,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = { .set_link_state = &qed_set_vf_link_state, .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, + .set_trust = &qed_set_vf_trust, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 509c02b4772e..0a2e3a36d2cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_SRIOV_H @@ -56,6 +80,14 @@ struct qed_public_vf_info { /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ int tx_rate; + + /* Trusted VFs can configure promiscuous mode. + * Also store shadow promisc configuration if needed. + */ + bool is_trusted_configured; + bool is_trusted_request; + u8 rx_accept_mode; + u8 tx_accept_mode; }; struct qed_iov_vf_init_params { @@ -221,6 +253,7 @@ enum qed_iov_wq_flag { QED_IOV_WQ_BULLETIN_UPDATE_FLAG, QED_IOV_WQ_STOP_WQ_FLAG, QED_IOV_WQ_FLR_FLAG, + QED_IOV_WQ_TRUST_FLAG, }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 60b31a8ede73..9667059b15bd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #include <linux/crc32.h> @@ -814,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, if (p_params->rss_params) { struct qed_rss_params *rss_params = p_params->rss_params; struct vfpf_vport_update_rss_tlv *p_rss_tlv; + int i, table_size; size = sizeof(struct vfpf_vport_update_rss_tlv); p_rss_tlv = qed_add_tlv(p_hwfn, @@ -836,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, p_rss_tlv->rss_enable = rss_params->rss_enable; p_rss_tlv->rss_caps = rss_params->rss_caps; p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; - memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, - sizeof(rss_params->rss_ind_table)); + + table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, + 1 << p_rss_tlv->rss_table_size_log); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue; + + p_queue = rss_params->rss_ind_table[i]; + p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; + } memcpy(p_rss_tlv->rss_key, rss_params->rss_key, sizeof(rss_params->rss_key)); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 11eb3854e6f2..7da0b165d8bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_VF_H diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 048a230c3ce0..38fbee6a442b 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDE) := qede.o -qede-y := qede_main.o qede_ethtool.o +qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_QED_RDMA) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index c79dc78746fc..b4234066689b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #ifndef _QEDE_H_ #define _QEDE_H_ #include <linux/compiler.h> @@ -26,7 +49,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 9 +#define QEDE_REVISION_VERSION 10 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ @@ -141,6 +164,7 @@ struct qede_dev { u16 num_queues; #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_RX_QUEUE_IDX(edev, i) (i) #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; @@ -171,7 +195,10 @@ struct qede_dev { #define QEDE_RSS_KEY_INITED BIT(1) #define QEDE_RSS_CAPS_INITED BIT(2) u32 rss_params_inited; /* bit-field to track initialized rss params */ - struct qed_update_vport_rss_params rss_params; + u16 rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; + u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ @@ -257,7 +284,7 @@ struct qede_rx_queue { u16 sw_rx_cons; u16 sw_rx_prod; - u16 num_rx_buffers; /* Slowpath */ + u16 filled_buffers; u8 data_direction; u8 rxq_id; @@ -270,6 +297,9 @@ struct qede_rx_queue { struct qed_chain rx_bd_ring; struct qed_chain rx_comp_ring ____cacheline_aligned; + /* Used once per each NAPI run */ + u16 num_rx_buffers; + /* GRO */ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; @@ -385,9 +415,42 @@ struct qede_reload_args { } u; }; +/* Datapath functions definition */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy); +int qede_free_tx_pkt(struct qede_dev *edev, + struct qede_tx_queue *txq, int *len); +int qede_poll(struct napi_struct *napi, int budget); +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); + +/* Filtering function definitions */ +void qede_force_mac(void *dev, u8 *mac, bool forced); +int qede_set_mac_addr(struct net_device *ndev, void *p); + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); +void qede_vlan_mark_nonconfigured(struct qede_dev *edev); +int qede_configure_vlan_filters(struct qede_dev *edev); + +int qede_set_features(struct net_device *dev, netdev_features_t features); +void qede_set_rx_mode(struct net_device *ndev); +void qede_config_rx_mode(struct net_device *ndev); +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update); + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); + #ifdef CONFIG_DCB void qede_set_dcbnl_ops(struct net_device *ndev); #endif + void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1c48f445c93b..baf264225c12 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include <linux/version.h> #include <linux/types.h> #include <linux/netdevice.h> @@ -14,6 +37,7 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/capability.h> +#include <linux/vmalloc.h> #include "qede.h" #define QEDE_RQSTAT_OFFSET(stat_name) \ @@ -908,8 +932,7 @@ static int qede_set_channels(struct net_device *dev, /* Reset the indirection table if rx queue count is updated */ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; - memset(&edev->rss_params.rss_ind_table, 0, - sizeof(edev->rss_params.rss_ind_table)); + memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table)); } qede_reload(edev, NULL, false); @@ -955,11 +978,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) + if (edev->rss_caps & QED_RSS_IPV4_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) + if (edev->rss_caps & QED_RSS_IPV6_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case IPV4_FLOW: @@ -992,8 +1015,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; u8 set_caps = 0, clr_caps = 0; + int rc = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Set rss flags command parameters: flow type = %d, data = %llu\n", @@ -1068,27 +1092,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } /* No action is needed if there is no change in the rss capability */ - if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & - ~clr_caps) | set_caps)) + if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps)) return 0; /* Update internal configuration */ - edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | - set_caps; + edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps); edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; /* Re-configure if possible */ - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) @@ -1113,7 +1139,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); - return sizeof(edev->rss_params.rss_key); + return sizeof(edev->rss_key); } static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) @@ -1128,11 +1154,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) return 0; for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - indir[i] = edev->rss_params.rss_ind_table[i]; + indir[i] = edev->rss_ind_table[i]; if (key) - memcpy(key, edev->rss_params.rss_key, - qede_get_rxfh_key_size(dev)); + memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev)); return 0; } @@ -1140,9 +1165,9 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) static int qede_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qede_dev *edev = netdev_priv(dev); - int i; + int i, rc = 0; if (edev->dev_info.common.num_hwfns > 1) { DP_INFO(edev, @@ -1158,27 +1183,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, if (indir) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) - edev->rss_params.rss_ind_table[i] = indir[i]; + edev->rss_ind_table[i] = indir[i]; edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; } if (key) { - memcpy(&edev->rss_params.rss_key, key, - qede_get_rxfh_key_size(dev)); + memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev)); edev->rss_params_inited |= QEDE_RSS_KEY_INITED; } - if (netif_running(edev->ndev)) { - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.update_rss_flg = 1; - vport_update_params.vport_id = 0; - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - return edev->ops->vport_update(edev->cdev, - &vport_update_params); + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) { + __qede_unlock(edev); + return -ENOMEM; + } + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); + rc = edev->ops->vport_update(edev->cdev, vport_update_params); + vfree(vport_update_params); } + __qede_unlock(edev); - return 0; + return rc; } /* This function enables the interrupt generation and the NAPI on the device */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c new file mode 100644 index 000000000000..107c3fda4792 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -0,0 +1,759 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <net/udp_tunnel.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> + +#include <linux/qed/qed_if.h> +#include "qede.h" + +void qede_force_mac(void *dev, u8 *mac, bool forced) +{ + struct qede_dev *edev = dev; + + /* MAC hints take effect only if we haven't set one already */ + if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) + return; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + +void qede_fill_rss_params(struct qede_dev *edev, + struct qed_update_vport_rss_params *rss, u8 *update) +{ + bool need_reset = false; + int i; + + if (QEDE_RSS_COUNT(edev) <= 1) { + memset(rss, 0, sizeof(*rss)); + *update = 0; + return; + } + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) { + need_reset = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) { + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val, val; + + val = QEDE_RSS_COUNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + /* Now that we have the queue-indirection, prepare the handles */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]); + + rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key)); + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + rss->rss_caps = edev->rss_caps; + + *update = 1; +} + +static int qede_set_ucast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char mac[ETH_ALEN]) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.mac_valid = 1; + ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params *params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return 0; + + params = vzalloc(sizeof(*params)); + if (!params) + return -ENOMEM; + + params->vport_id = 0; + params->accept_any_vlan = action; + params->update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, params); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } + + vfree(params); + return 0; +} + +int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + __qede_lock(edev); + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + goto out; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + goto out; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) { + rc = qede_config_accept_any_vlan(edev, true); + if (rc) { + kfree(vlan); + goto out; + } + } + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + +out: + __qede_unlock(edev); + return rc; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + rc = qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + rc = qede_config_accept_any_vlan(edev, false); + + if (rc && !real_rc) + real_rc = rc; + + return real_rc; +} + +int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc = 0; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + __qede_lock(edev); + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + goto out; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + goto out; + } + + /* Remove vlan */ + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + goto out; + } + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + +out: + __qede_unlock(edev); + return rc; +} + +void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", vlan->vid); + } + + edev->accept_any_vlan = false; +} + +static void qede_set_features_reload(struct qede_dev *edev, + struct qede_reload_args *args) +{ + edev->ndev->features = args->u.features; +} + +int qede_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool need_reload = false; + + /* No action needed if hardware GRO is disabled during driver load */ + if (changes & NETIF_F_GRO) { + if (dev->features & NETIF_F_GRO) + need_reload = !edev->gro_disable; + else + need_reload = edev->gro_disable; + } + + if (need_reload) { + struct qede_reload_args args; + + args.u.features = features; + args.func = &qede_set_features_reload; + + /* Make sure that we definitely need to reload. + * In case of an eBPF attached program, there will be no FW + * aggregations, so no need to actually reload. + */ + __qede_lock(edev); + if (edev->xdp_prog) + args.func(edev, &args); + else + qede_reload(edev, &args, true); + __qede_unlock(edev); + + return 1; + } + + return 0; +} + +void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", + t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } + + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_xdp_reload_func(struct qede_dev *edev, + struct qede_reload_args *args) +{ + struct bpf_prog *old; + + old = xchg(&edev->xdp_prog, args->u.new_prog); + if (old) + bpf_prog_put(old); +} + +static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) +{ + struct qede_reload_args args; + + if (prog && prog->xdp_adjust_head) { + DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + + /* If we're called, there was already a bpf reference increment */ + args.func = &qede_xdp_reload_func; + args.u.new_prog = prog; + qede_reload(edev, &args, false); + + return 0; +} + +int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return qede_xdp_set(edev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!edev->xdp_prog; + return 0; + default: + return -EINVAL; + } +} + +static int qede_set_mcast_rx_mac(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + unsigned char *mac, int num_macs) +{ + struct qed_filter_params filter_cmd; + int i; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_MCAST; + filter_cmd.filter.mcast.type = opcode; + filter_cmd.filter.mcast.num = num_macs; + + for (i = 0; i < num_macs; i++, mac += ETH_ALEN) + ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + +int qede_set_mac_addr(struct net_device *ndev, void *p) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct sockaddr *addr = p; + int rc; + + ASSERT_RTNL(); /* @@@TBD To be removed */ + + DP_INFO(edev, "Set_mac_addr called\n"); + + if (!is_valid_ether_addr(addr->sa_data)) { + DP_NOTICE(edev, "The MAC address is not valid\n"); + return -EFAULT; + } + + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + + ether_addr_copy(ndev->dev_addr, addr->sa_data); + + if (!netif_running(ndev)) { + DP_NOTICE(edev, "The device is currently down\n"); + return 0; + } + + /* Remove the previous primary mac */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + edev->primary_mac); + if (rc) + return rc; + + edev->ops->common->update_mac(edev->cdev, addr->sa_data); + + /* Add MAC filter according to the new unicast HW MAC address */ + ether_addr_copy(edev->primary_mac, ndev->dev_addr); + return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + edev->primary_mac); +} + +static int +qede_configure_mcast_filtering(struct net_device *ndev, + enum qed_filter_rx_mode_type *accept_flags) +{ + struct qede_dev *edev = netdev_priv(ndev); + unsigned char *mc_macs, *temp; + struct netdev_hw_addr *ha; + int rc = 0, mc_count; + size_t size; + + size = 64 * ETH_ALEN; + + mc_macs = kzalloc(size, GFP_KERNEL); + if (!mc_macs) { + DP_NOTICE(edev, + "Failed to allocate memory for multicast MACs\n"); + rc = -ENOMEM; + goto exit; + } + + temp = mc_macs; + + /* Remove all previously configured MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, + mc_macs, 1); + if (rc) + goto exit; + + netif_addr_lock_bh(ndev); + + mc_count = netdev_mc_count(ndev); + if (mc_count < 64) { + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + } + + netif_addr_unlock_bh(ndev); + + /* Check for all multicast @@@TBD resource allocation */ + if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) { + if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) + *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + } else { + /* Add all multicast MAC filters */ + rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, + mc_macs, mc_count); + } + +exit: + kfree(mc_macs); + return rc; +} + +void qede_set_rx_mode(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +/* Must be called with qede_lock held */ +void qede_config_rx_mode(struct net_device *ndev) +{ + enum qed_filter_rx_mode_type accept_flags; + struct qede_dev *edev = netdev_priv(ndev); + struct qed_filter_params rx_mode; + unsigned char *uc_macs, *temp; + struct netdev_hw_addr *ha; + int rc, uc_count; + size_t size; + + netif_addr_lock_bh(ndev); + + uc_count = netdev_uc_count(ndev); + size = uc_count * ETH_ALEN; + + uc_macs = kzalloc(size, GFP_ATOMIC); + if (!uc_macs) { + DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); + netif_addr_unlock_bh(ndev); + return; + } + + temp = uc_macs; + netdev_for_each_uc_addr(ha, ndev) { + ether_addr_copy(temp, ha->addr); + temp += ETH_ALEN; + } + + netif_addr_unlock_bh(ndev); + + /* Configure the struct for the Rx mode */ + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + + /* Remove all previous unicast secondary macs and multicast macs + * (configrue / leave the primary mac) + */ + rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, + edev->primary_mac); + if (rc) + goto out; + + /* Check for promiscuous */ + if (ndev->flags & IFF_PROMISC) + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + else + accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; + + /* Configure all filters regardless, in case promisc is rejected */ + if (uc_count < edev->dev_info.num_mac_filters) { + int i; + + temp = uc_macs; + for (i = 0; i < uc_count; i++) { + rc = qede_set_ucast_rx_mac(edev, + QED_FILTER_XCAST_TYPE_ADD, + temp); + if (rc) + goto out; + + temp += ETH_ALEN; + } + } else { + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + } + + rc = qede_configure_mcast_filtering(ndev, &accept_flags); + if (rc) + goto out; + + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + + rx_mode.filter.accept_flags = accept_flags; + edev->ops->filter_config(edev->cdev, &rx_mode); +out: + kfree(uc_macs); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c new file mode 100644 index 000000000000..26848eed3bc1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -0,0 +1,1695 @@ +/* QLogic qede NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/bpf_trace.h> +#include <net/udp_tunnel.h> +#include <linux/ip.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/ip6_checksum.h> + +#include <linux/qed/qed_if.h> +#include "qede.h" +/********************************* + * Content also used by slowpath * + *********************************/ + +int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) +{ + struct sw_rx_data *sw_rx_data; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + struct page *data; + + /* In case lazy-allocation is allowed, postpone allocation until the + * end of the NAPI run. We'd still need to make sure the Rx ring has + * sufficient buffers to guarantee an additional Rx interrupt. + */ + if (allow_lazy && likely(rxq->filled_buffers > 12)) { + rxq->filled_buffers--; + return 0; + } + + data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!data)) + return -ENOMEM; + + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(rxq->dev, data, 0, + PAGE_SIZE, rxq->data_direction); + if (unlikely(dma_mapping_error(rxq->dev, mapping))) { + __free_page(data); + return -ENOMEM; + } + + sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; + sw_rx_data->data = data; + sw_rx_data->mapping = mapping; + + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); + WARN_ON(!rx_bd); + rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + + rxq->sw_rx_prod++; + rxq->filled_buffers++; + + return 0; +} + +/* Unmap the data and free skb */ +int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) +{ + u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_bd *tx_data_bd; + int bds_consumed = 0; + int nbds; + bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; + int i, split_bd_len = 0; + + if (unlikely(!skb)) { + DP_ERR(edev, + "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", + idx, txq->sw_tx_cons, txq->sw_tx_prod); + return -1; + } + + *len = skb->len; + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + bds_consumed++; + + nbds = first_bd->data.nbds; + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + bds_consumed++; + } + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + while (bds_consumed++ < nbds) + qed_chain_consume(&txq->tx_pbl); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; + + return 0; +} + +/* Unmap the data and free skb when mapping failed during start_xmit */ +static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, + struct eth_tx_1st_bd *first_bd, + int nbd, bool data_split) +{ + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; + struct eth_tx_bd *tx_data_bd; + int i, split_bd_len = 0; + + /* Return prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + if (data_split) { + struct eth_tx_bd *split = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + split_bd_len = BD_UNMAP_LEN(split); + nbd--; + } + + dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); + + /* Unmap the data of the skb frags */ + for (i = 0; i < nbd; i++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + if (tx_data_bd->nbytes) + dma_unmap_page(txq->dev, + BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + } + + /* Return again prod to its position before this skb was handled */ + qed_chain_set_prod(&txq->tx_pbl, + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); + + /* Free skb */ + dev_kfree_skb_any(skb); + txq->sw_tx_ring.skbs[idx].skb = NULL; + txq->sw_tx_ring.skbs[idx].flags = 0; +} + +static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) +{ + u32 rc = XMIT_L4_CSUM; + __be16 l3_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return XMIT_PLAIN; + + l3_proto = vlan_get_protocol(skb); + if (l3_proto == htons(ETH_P_IPV6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *ipv6_ext = 1; + + if (skb->encapsulation) { + rc |= XMIT_ENC; + if (skb_is_gso(skb)) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || + (gso_type & SKB_GSO_GRE_CSUM)) + rc |= XMIT_ENC_GSO_L4_CSUM; + + rc |= XMIT_LSO; + return rc; + } + } + + if (skb_is_gso(skb)) + rc |= XMIT_LSO; + + return rc; +} + +static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, + struct eth_tx_2nd_bd *second_bd, + struct eth_tx_3rd_bd *third_bd) +{ + u8 l4_proto; + u16 bd2_bits1 = 0, bd2_bits2 = 0; + + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) + << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; + + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); + + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) + l4_proto = ipv6_hdr(skb)->nexthdr; + else + l4_proto = ip_hdr(skb)->protocol; + + if (l4_proto == IPPROTO_UDP) + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + + if (third_bd) + third_bd->data.bitfields |= + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); + + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); + second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); +} + +static int map_frag_to_bd(struct qede_tx_queue *txq, + skb_frag_t *frag, struct eth_tx_bd *bd) +{ + dma_addr_t mapping; + + /* Map skb non-linear frag data for DMA */ + mapping = skb_frag_dma_map(txq->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) + return -ENOMEM; + + /* Setup the data pointer of the frag data */ + BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); + + return 0; +} + +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + +/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) +static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) +{ + int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; + + if (xmit_type & XMIT_LSO) { + int hlen; + + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); + + /* linear payload would require its own BD */ + if (skb_headlen(skb) > hlen) + allowed_frags--; + } + + return (skb_shinfo(skb)->nr_frags > allowed_frags); +} +#endif + +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, + struct sw_rx_data *metadata, u16 padding, u16 length) +{ + struct qede_tx_queue *txq = fp->xdp_tx; + u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + struct eth_tx_1st_bd *first_bd; + + if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + txq->stopped_cnt++; + return -ENOMEM; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); + first_bd->data.bitfields |= + (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + first_bd->data.nbds = 1; + + /* We can safely ignore the offset, as it's 0 for XDP */ + BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + + /* Synchronize the buffer back to device, as program [probably] + * has changed it. + */ + dma_sync_single_for_device(&edev->pdev->dev, + metadata->mapping + padding, + length, PCI_DMA_TODEVICE); + + txq->sw_tx_ring.pages[idx] = metadata->data; + txq->sw_tx_prod++; + + /* Mark the fastpath for future XDP doorbell */ + fp->xdp_xmit = 1; + + return 0; +} + +int qede_txq_has_work(struct qede_tx_queue *txq) +{ + u16 hw_bd_cons; + + /* Tell compiler that consumer and producer can change */ + barrier(); + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) + return 0; + + return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); +} + +static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct eth_tx_1st_bd *bd; + u16 hw_bd_cons; + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + + dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & + NUM_TX_BDS_MAX]); + + txq->sw_tx_cons++; + txq->xmit_pkts++; + } +} + +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + u16 hw_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; + int rc; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + + hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); + barrier(); + + while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + int len = 0; + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", + hw_bd_cons, + qed_chain_get_cons_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + txq->xmit_pkts++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(netdev_txq))) { + /* Taking tx_lock is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in qede_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(netdev_txq)) && + (edev->state == QEDE_STATE_OPEN) && + (qed_chain_get_elem_left(&txq->tx_pbl) + >= (MAX_SKB_FRAGS + 1))) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_DONE, + "Wake queue was called\n"); + } + + __netif_tx_unlock(netdev_txq); + } + + return 0; +} + +bool qede_has_rx_work(struct qede_rx_queue *rxq) +{ + u16 hw_comp_cons, sw_comp_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + return hw_comp_cons != sw_comp_cons; +} + +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring + */ +static inline void qede_reuse_page(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; + + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; + + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); + + rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + +static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + + return -ENOMEM; + } + + dma_unmap_page(rxq->dev, curr_cons->mapping, + PAGE_SIZE, rxq->data_direction); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + page_ref_inc(curr_cons->data); + qede_reuse_page(rxq, curr_cons); + } + + return 0; +} + +void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ + u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); + u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = {0}; + + /* Update producers */ + rx_prods.bd_prod = cpu_to_le16(bd_prod); + rx_prods.cqe_prod = cpu_to_le16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (u32 *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + +static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; + enum rss_hash_type htype; + u32 hash = 0; + + htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); + if (htype) { + hash_type = ((htype == RSS_HASH_TYPE_IPV4) || + (htype == RSS_HASH_TYPE_IPV6)) ? + PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; + hash = le32_to_cpu(rss_hash); + } + skb_set_hash(skb, hash, hash_type); +} + +static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) +{ + skb_checksum_none_assert(skb); + + if (csum_flag & QEDE_CSUM_UNNECESSARY) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { + skb->csum_level = 1; + skb->encapsulation = 1; + } +} + +static inline void qede_skb_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct sk_buff *skb, u16 vlan_tag) +{ + if (vlan_tag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&fp->napi, skb); + rxq->rcv_pkts++; +} + +static void qede_set_gro_params(struct qede_dev *edev, + struct sk_buff *skb, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); + + if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & + PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - + cqe->header_len; +} + +static int qede_fill_frag_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + u8 tpa_agg_index, u16 len_on_bd) +{ + struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & + NUM_RX_BDS_MAX]; + struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; + struct sk_buff *skb = tpa_info->skb; + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto out; + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, tpa_info->frag_id++, + current_bd->data, current_bd->page_offset, + len_on_bd); + + if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + page_ref_inc(current_bd->data); + goto out; + } + + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; + + skb->data_len += len_on_bd; + skb->truesize += rxq->rx_buf_seg_size; + skb->len += len_on_bd; + + return 0; + +out: + tpa_info->state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, 1); + + return -ENOMEM; +} + +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static void qede_tpa_start(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *replace_buf = &tpa_info->buffer; + dma_addr_t mapping = tpa_info->buffer_mapping; + struct sw_rx_data *sw_rx_data_cons; + struct sw_rx_data *sw_rx_data_prod; + + sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + /* Use pre-allocated replacement buffer - we can't release the agg. + * start until its over and we don't want to risk allocation failing + * here, so re-allocate when aggregation will be over. + */ + sw_rx_data_prod->mapping = replace_buf->mapping; + + sw_rx_data_prod->data = replace_buf->data; + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + sw_rx_data_prod->page_offset = replace_buf->page_offset; + + rxq->sw_rx_prod++; + + /* move partial skb from cons to pool (don't unmap yet) + * save mapping, incase we drop the packet later on. + */ + tpa_info->buffer = *sw_rx_data_cons; + mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), + le32_to_cpu(rx_bd_cons->addr.lo)); + + tpa_info->buffer_mapping = mapping; + rxq->sw_rx_cons++; + + /* set tpa state to start only if we are able to allocate skb + * for this aggregation, otherwise mark as error and aggregation will + * be dropped + */ + tpa_info->skb = netdev_alloc_skb(edev->ndev, + le16_to_cpu(cqe->len_on_first_bd)); + if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + goto cons_buf; + } + + /* Start filling in the aggregation info */ + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); + tpa_info->frag_id = 0; + tpa_info->state = QEDE_AGG_STATE_START; + + /* Store some information from first CQE */ + tpa_info->start_cqe_placement_offset = cqe->placement_offset; + tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); + if ((le16_to_cpu(cqe->pars_flags.flags) >> + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + else + tpa_info->vlan_tag = 0; + + qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); + + /* This is needed in order to enable forwarding support */ + qede_set_gro_params(edev, tpa_info->skb, cqe); + +cons_buf: /* We still need to handle bd_len_list to consume buffers */ + if (likely(cqe->ext_bd_len_list[0])) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->ext_bd_len_list[0])); + + if (unlikely(cqe->ext_bd_len_list[1])) { + DP_ERR(edev, + "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + tpa_info->state = QEDE_AGG_STATE_ERROR; + } +} + +#ifdef CONFIG_INET +static void qede_gro_ip_csum(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + + tcp_gro_complete(skb); +} + +static void qede_gro_ipv6_csum(struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); + tcp_gro_complete(skb); +} +#endif + +static void qede_gro_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + skb_reset_network_header(skb); + + switch (skb->protocol) { + case htons(ETH_P_IP): + qede_gro_ip_csum(skb); + break; + case htons(ETH_P_IPV6): + qede_gro_ipv6_csum(skb); + break; + default: + DP_ERR(edev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + ntohs(skb->protocol)); + } + } +#endif + +send_skb: + skb_record_rx_queue(skb, fp->rxq->rxq_id); + qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); +} + +static inline void qede_tpa_cont(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + int i; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA cont with more than a single len_list entry\n"); +} + +static void qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_agg_info *tpa_info; + struct sk_buff *skb; + int i; + + tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + skb = tpa_info->skb; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA emd with more than a single len_list entry\n"); + + if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) + goto err; + + /* Sanity */ + if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) + DP_ERR(edev, + "Strange - TPA had %02x BDs, but SKB has only %d frags\n", + cqe->num_of_bds, tpa_info->frag_id); + if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) + DP_ERR(edev, + "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", + le16_to_cpu(cqe->total_packet_len), skb->len); + + memcpy(skb->data, + page_address(tpa_info->buffer.data) + + tpa_info->start_cqe_placement_offset + + tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); + + /* Finalize the SKB */ + skb->protocol = eth_type_trans(skb, edev->ndev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); + + qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); + + tpa_info->state = QEDE_AGG_STATE_NONE; + + return; +err: + tpa_info->state = QEDE_AGG_STATE_NONE; + dev_kfree_skb_any(tpa_info->skb); + tpa_info->skb = NULL; +} + +static u8 qede_check_notunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 csum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + csum = QEDE_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return csum; +} + +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + +/* Return true iff packet is to be passed to stack */ +static bool qede_rx_xdp(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + struct bpf_prog *prog, + struct sw_rx_data *bd, + struct eth_fast_path_rx_reg_cqe *cqe) +{ + u16 len = le16_to_cpu(cqe->len_on_first_bd); + struct xdp_buff xdp; + enum xdp_action act; + + xdp.data = page_address(bd->data) + cqe->placement_offset; + xdp.data_end = xdp.data + len; + + /* Queues always have a full reset currently, so for the time + * being until there's atomic program replace just mark read + * side for map helpers. + */ + rcu_read_lock(); + act = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); + + if (act == XDP_PASS) + return true; + + /* Count number of packets not to be passed to stack */ + rxq->xdp_no_pass++; + + switch (act) { + case XDP_TX: + /* We need the replacement buffer before transmit. */ + if (qede_alloc_rx_buffer(rxq, true)) { + qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); + return false; + } + + /* Now if there's a transmission problem, we'd still have to + * throw current buffer, as replacement was already allocated. + */ + if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); + } + + /* Regardless, we've consumed an Rx BD */ + qede_rx_bd_ring_consume(rxq); + return false; + + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(edev->ndev, prog, act); + case XDP_DROP: + qede_recycle_rx_bd_ring(rxq, cqe->bd_num); + } + + return false; +} + +static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, + u16 pad) +{ + unsigned int offset = bd->page_offset; + struct skb_frag_struct *frag; + struct page *page = bd->data; + unsigned int pull_len; + struct sk_buff *skb; + unsigned char *va; + + /* Allocate a new SKB with a sufficient large header len */ + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + /* Copy data into SKB - if it's small, we can simply copy it and + * re-use the already allcoated & mapped memory. + */ + if (len + pad <= edev->rx_copybreak) { + memcpy(skb_put(skb, len), + page_address(page) + pad + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, pad + offset, len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + /* Correct the skb & frag sizes offset after the pull */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(page); + dev_kfree_skb_any(skb); + return NULL; + } + +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + return skb; +} + +static int qede_rx_build_jumbo(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sk_buff *skb, + struct eth_fast_path_rx_reg_cqe *cqe, + u16 first_bd_len) +{ + u16 pkt_len = le16_to_cpu(cqe->pkt_len); + struct sw_rx_data *bd; + u16 bd_cons_idx; + u8 num_frags; + + pkt_len -= first_bd_len; + + /* We've already used one BD for the SKB. Now take care of the rest */ + for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : + pkt_len; + + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + goto out; + } + + /* We need a replacement buffer for each BD */ + if (unlikely(qede_alloc_rx_buffer(rxq, true))) + goto out; + + /* Now that we've allocated the replacement buffer, + * we can safely consume the next BD and map it to the SKB. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + qede_rx_bd_ring_consume(rxq); + + dma_unmap_page(rxq->dev, bd->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + bd->data, 0, cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (unlikely(pkt_len)) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + +out: + return num_frags; +} + +static int qede_rx_process_tpa_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq, + union eth_rx_cqe *cqe, + enum eth_rx_cqe_type type) +{ + switch (type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); + return 0; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); + return 0; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); + return 1; + default: + return 0; + } +} + +static int qede_rx_process_cqe(struct qede_dev *edev, + struct qede_fastpath *fp, + struct qede_rx_queue *rxq) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); + struct eth_fast_path_rx_reg_cqe *fp_cqe; + u16 len, pad, bd_cons_idx, parse_flag; + enum eth_rx_cqe_type cqe_type; + union eth_rx_cqe *cqe; + struct sw_rx_data *bd; + struct sk_buff *skb; + __le16 flags; + u8 csum_flag; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + /* Process an unlikely slowpath event */ + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + struct eth_slow_path_rx_cqe *sp_cqe; + + sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; + edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); + return 0; + } + + /* Handle TPA cqes */ + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) + return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); + + /* Get the data from the SW ring; Consume it only after it's evident + * we wouldn't recycle it. + */ + bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + bd = &rxq->sw_rx_ring[bd_cons_idx]; + + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + pad = fp_cqe->placement_offset; + + /* Run eBPF program if one is attached */ + if (xdp_prog) + if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) + return 1; + + /* If this is an error packet then drop it */ + flags = cqe->fast_path_regular.pars_flags.flags; + parse_flag = le16_to_cpu(flags); + + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + rxq->rx_ip_frags++; + } else { + DP_NOTICE(edev, + "CQE has error, flags = %x, dropping incoming packet\n", + parse_flag); + rxq->rx_hw_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + } + + /* Basic validation passed; Need to prepare an SKB. This would also + * guarantee to finally consume the first BD upon success. + */ + skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + if (!skb) { + rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); + return 0; + } + + /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed + * by a single cqe. + */ + if (fp_cqe->bd_num > 1) { + u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, + fp_cqe, len); + + if (unlikely(unmapped_frags > 0)) { + qede_recycle_rx_bd_ring(rxq, unmapped_frags); + dev_kfree_skb_any(skb); + return 0; + } + } + + /* The SKB contains all the data. Now prepare meta-magic */ + skb->protocol = eth_type_trans(skb, edev->ndev); + qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); + qede_set_skb_csum(skb, csum_flag); + skb_record_rx_queue(skb, rxq->rxq_id); + + /* SKB is prepared - pass it to stack */ + qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); + + return 1; +} + +static int qede_rx_int(struct qede_fastpath *fp, int budget) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_dev *edev = fp->edev; + u16 hw_comp_cons, sw_comp_cons; + int work_done = 0; + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD in the while-loop before reading hw_comp_cons. If the CQE is + * read before it is written by FW, then FW writes CQE and SB, and then + * the CPU reads the hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Loop to complete all indicated BDs */ + while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { + qede_rx_process_cqe(edev, fp, rxq); + qed_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + work_done++; + } + + /* Allocate replacement buffers */ + while (rxq->num_rx_buffers - rxq->filled_buffers) + if (qede_alloc_rx_buffer(rxq, false)) + break; + + /* Update producers */ + qede_update_rx_prod(edev, rxq); + + return work_done; +} + +static bool qede_poll_is_more_work(struct qede_fastpath *fp) +{ + qed_sb_update_sb_idx(fp->sb_info); + + /* *_has_*_work() reads the status block, thus we need to ensure that + * status block indices have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that we won't write the + * "newer" value of the status block to HW (if there was a DMA right + * after qede_has_rx_work and if there is no rmb, the memory reading + * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). + * In this case there will never be another interrupt until there is + * another update of the status block, while there is still unhandled + * work. + */ + rmb(); + + if (likely(fp->type & QEDE_FASTPATH_RX)) + if (qede_has_rx_work(fp->rxq)) + return true; + + if (fp->type & QEDE_FASTPATH_XDP) + if (qede_txq_has_work(fp->xdp_tx)) + return true; + + if (likely(fp->type & QEDE_FASTPATH_TX)) + if (qede_txq_has_work(fp->txq)) + return true; + + return false; +} + +/********************* + * NDO & API related * + *********************/ +int qede_poll(struct napi_struct *napi, int budget) +{ + struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, + napi); + struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + + if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) + qede_tx_int(edev, fp->txq); + + if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) + qede_xdp_tx_int(edev, fp->xdp_tx); + + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + if (!qede_poll_is_more_work(fp)) { + napi_complete_done(napi, rx_work_done); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); + } else { + rx_work_done = budget; + } + } + + if (fp->xdp_xmit) { + u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + + fp->xdp_xmit = 0; + fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(fp->xdp_tx); + } + + return rx_work_done; +} + +irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) +{ + struct qede_fastpath *fp = fp_cookie; + + qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); + + napi_schedule_irqoff(&fp->napi); + return IRQ_HANDLED; +} + +/* Main transmit function */ +netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + struct netdev_queue *netdev_txq; + struct qede_tx_queue *txq; + struct eth_tx_1st_bd *first_bd; + struct eth_tx_2nd_bd *second_bd = NULL; + struct eth_tx_3rd_bd *third_bd = NULL; + struct eth_tx_bd *tx_data_bd = NULL; + u16 txq_index; + u8 nbd = 0; + dma_addr_t mapping; + int rc, frag_idx = 0, ipv6_ext = 0; + u8 xmit_type; + u16 idx; + u16 hlen; + bool data_split = false; + + /* Get tx-queue context and netdev index */ + txq_index = skb_get_queue_mapping(skb); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); + txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; + netdev_txq = netdev_get_tx_queue(ndev, txq_index); + + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); + + xmit_type = qede_xmit_type(skb, &ipv6_ext); + +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) + if (qede_pkt_req_lin(skb, xmit_type)) { + if (skb_linearize(skb)) { + DP_NOTICE(edev, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring.skbs[idx].skb = skb; + first_bd = (struct eth_tx_1st_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + first_bd->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(txq->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(txq->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + nbd++; + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* In case there is IPv6 with extension headers or LSO we need 2nd and + * 3rd BDs. + */ + if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { + second_bd = (struct eth_tx_2nd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(second_bd, 0, sizeof(*second_bd)); + + nbd++; + third_bd = (struct eth_tx_3rd_bd *) + qed_chain_produce(&txq->tx_pbl); + memset(third_bd, 0, sizeof(*third_bd)); + + nbd++; + /* We need to fill in additional data in second_bd... */ + tx_data_bd = (struct eth_tx_bd *)second_bd; + } + + if (skb_vlan_tag_present(skb)) { + first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Fill the parsing flags & params according to the requested offload */ + if (xmit_type & XMIT_L4_CSUM) { + /* We don't re-calculate IP checksum as it is already done by + * the upper stack + */ + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + } + + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + + /* If the packet is IPv6 with extension header, indicate that + * to FW and pass few params, since the device cracker doesn't + * support parsing IPv6 with extension header/s. + */ + if (unlikely(ipv6_ext)) + qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); + } + + if (xmit_type & XMIT_LSO) { + first_bd->data.bd_flags.bitfields |= + (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); + third_bd->data.lso_mss = + cpu_to_le16(skb_shinfo(skb)->gso_size); + + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + + if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { + u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + + first_bd->data.bd_flags.bitfields |= 1 << tmp; + } + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } + + /* @@@TBD - if will not be removed need to check */ + third_bd->data.bitfields |= + cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + + /* Make life easier for FW guys who can't deal with header and + * data on same BD. If we need to split, use the second bd... + */ + if (unlikely(skb_headlen(skb) > hlen)) { + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "TSO split header size is %d (%x:%x)\n", + first_bd->nbytes, first_bd->addr.hi, + first_bd->addr.lo); + + mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), + le32_to_cpu(first_bd->addr.lo)) + + hlen; + + BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, + le16_to_cpu(first_bd->nbytes) - + hlen); + + /* this marks the BD as one that has no + * individual mapping + */ + txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; + + first_bd->nbytes = cpu_to_le16(hlen); + + tx_data_bd = (struct eth_tx_bd *)third_bd; + data_split = true; + } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + } + + /* Handle fragmented skb */ + /* special handle for frags inside 2nd and 3rd bds.. */ + while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + + if (tx_data_bd == (struct eth_tx_bd *)second_bd) + tx_data_bd = (struct eth_tx_bd *)third_bd; + else + tx_data_bd = NULL; + + frag_idx++; + } + + /* map last frags into 4th, 5th .... */ + for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { + tx_data_bd = (struct eth_tx_bd *) + qed_chain_produce(&txq->tx_pbl); + + memset(tx_data_bd, 0, sizeof(*tx_data_bd)); + + rc = map_frag_to_bd(txq, + &skb_shinfo(skb)->frags[frag_idx], + tx_data_bd); + if (rc) { + qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + } + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = nbd; + + netdev_tx_sent_queue(netdev_txq, skb->len); + + skb_tx_timestamp(skb); + + /* Advance packet producer only before sending the packet since mapping + * of pages may fail. + */ + txq->sw_tx_prod++; + + /* 'next page' entries are counted in the producer value */ + txq->tx_db.data.bd_prod = + cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + qede_update_tx_producer(txq); + + if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) + < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + + netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Stop queue was called\n"); + /* paired memory barrier is in qede_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons + */ + smp_mb(); + + if ((qed_chain_get_elem_left(&txq->tx_pbl) >= + (MAX_SKB_FRAGS + 1)) && + (edev->state == QEDE_STATE_OPEN)) { + netif_tx_wake_queue(netdev_txq); + DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, + "Wake queue was called\n"); + } + } + + return NETDEV_TX_OK; +} + +/* 8B udp header + 8B base tunnel header + 32B option length */ +#define QEDE_MAX_TUN_HDR_LEN 48 + +netdev_features_t qede_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation) { + u8 l4_proto = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + /* Disable offloads for geneve tunnels, as HW can't parse + * the geneve header which has option length greater than 32B. + */ + if ((l4_proto == IPPROTO_UDP) && + ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } + + return features; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index aecdd1c5c0ea..40a76a1d5973 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1,11 +1,34 @@ /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -* -* This software is available under the terms of the GNU General Public License -* (GPL) Version 2, available from the file COPYING in the main directory of -* this source tree. -*/ - + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ #include <linux/module.h> #include <linux/pci.h> #include <linux/version.h> @@ -36,6 +59,7 @@ #include <linux/random.h> #include <net/ip6_checksum.h> #include <linux/bitops.h> +#include <linux/vmalloc.h> #include <linux/qed/qede_roce.h> #include "qede.h" @@ -154,8 +178,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_params; int rc; + vport_params = vzalloc(sizeof(*vport_params)); + if (!vport_params) + return -ENOMEM; DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); @@ -163,15 +191,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { - struct qed_update_vport_params params; - - memset(¶ms, 0, sizeof(params)); - params.vport_id = 0; - params.update_tx_switching_flg = 1; - params.tx_switching_flg = num_vfs_param ? 1 : 0; - edev->ops->vport_update(edev->cdev, ¶ms); + vport_params->vport_id = 0; + vport_params->update_tx_switching_flg = 1; + vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, vport_params); } + vfree(vport_params); return rc; } #endif @@ -187,18 +213,6 @@ static struct pci_driver qede_pci_driver = { #endif }; -static void qede_force_mac(void *dev, u8 *mac, bool forced) -{ - struct qede_dev *edev = dev; - - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) - return; - - ether_addr_copy(edev->ndev->dev_addr, mac); - ether_addr_copy(edev->primary_mac, mac); -} - static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, @@ -294,1643 +308,8 @@ static void __exit qede_cleanup(void) module_init(qede_init); module_exit(qede_cleanup); -/* ------------------------------------------------------------------------- - * START OF FAST-PATH - * ------------------------------------------------------------------------- - */ - -/* Unmap the data and free skb */ -static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, int *len) -{ - u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_bd *tx_data_bd; - int bds_consumed = 0; - int nbds; - bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; - int i, split_bd_len = 0; - - if (unlikely(!skb)) { - DP_ERR(edev, - "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", - idx, txq->sw_tx_cons, txq->sw_tx_prod); - return -1; - } - - *len = skb->len; - - first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - bds_consumed++; - - nbds = first_bd->data.nbds; - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - bds_consumed++; - } - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_consume(&txq->tx_pbl); - dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - while (bds_consumed++ < nbds) - qed_chain_consume(&txq->tx_pbl); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; - - return 0; -} - -/* Unmap the data and free skb when mapping failed during start_xmit */ -static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, - struct eth_tx_1st_bd *first_bd, - int nbd, bool data_split) -{ - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; - struct eth_tx_bd *tx_data_bd; - int i, split_bd_len = 0; - - /* Return prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - if (data_split) { - struct eth_tx_bd *split = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - split_bd_len = BD_UNMAP_LEN(split); - nbd--; - } - - dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), - BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); - - /* Unmap the data of the skb frags */ - for (i = 0; i < nbd; i++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - if (tx_data_bd->nbytes) - dma_unmap_page(txq->dev, - BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - } - - /* Return again prod to its position before this skb was handled */ - qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); - - /* Free skb */ - dev_kfree_skb_any(skb); - txq->sw_tx_ring.skbs[idx].skb = NULL; - txq->sw_tx_ring.skbs[idx].flags = 0; -} - -static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) -{ - u32 rc = XMIT_L4_CSUM; - __be16 l3_proto; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return XMIT_PLAIN; - - l3_proto = vlan_get_protocol(skb); - if (l3_proto == htons(ETH_P_IPV6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - *ipv6_ext = 1; - - if (skb->encapsulation) { - rc |= XMIT_ENC; - if (skb_is_gso(skb)) { - unsigned short gso_type = skb_shinfo(skb)->gso_type; - - if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || - (gso_type & SKB_GSO_GRE_CSUM)) - rc |= XMIT_ENC_GSO_L4_CSUM; - - rc |= XMIT_LSO; - return rc; - } - } - - if (skb_is_gso(skb)) - rc |= XMIT_LSO; - - return rc; -} - -static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, - struct eth_tx_2nd_bd *second_bd, - struct eth_tx_3rd_bd *third_bd) -{ - u8 l4_proto; - u16 bd2_bits1 = 0, bd2_bits2 = 0; - - bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - - bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & - ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) - << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - - bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << - ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); - - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) - l4_proto = ipv6_hdr(skb)->nexthdr; - else - l4_proto = ip_hdr(skb)->protocol; - - if (l4_proto == IPPROTO_UDP) - bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - - if (third_bd) - third_bd->data.bitfields |= - cpu_to_le16(((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - - second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); - second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); -} - -static int map_frag_to_bd(struct qede_tx_queue *txq, - skb_frag_t *frag, struct eth_tx_bd *bd) -{ - dma_addr_t mapping; - - /* Map skb non-linear frag data for DMA */ - mapping = skb_frag_dma_map(txq->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) - return -ENOMEM; - - /* Setup the data pointer of the frag data */ - BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); - - return 0; -} - -static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) -{ - if (is_encap_pkt) - return (skb_inner_transport_header(skb) + - inner_tcp_hdrlen(skb) - skb->data); - else - return (skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data); -} - -/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) -static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) -{ - int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; - - if (xmit_type & XMIT_LSO) { - int hlen; - - hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); - - /* linear payload would require its own BD */ - if (skb_headlen(skb) > hlen) - allowed_frags--; - } - - return (skb_shinfo(skb)->nr_frags > allowed_frags); -} -#endif - -static inline void qede_update_tx_producer(struct qede_tx_queue *txq) -{ - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, - struct sw_rx_data *metadata, u16 padding, u16 length) -{ - struct qede_tx_queue *txq = fp->xdp_tx; - u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - struct eth_tx_1st_bd *first_bd; - - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { - txq->stopped_cnt++; - return -ENOMEM; - } - - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); - - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - first_bd->data.bitfields |= - (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - first_bd->data.nbds = 1; - - /* We can safely ignore the offset, as it's 0 for XDP */ - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); - - /* Synchronize the buffer back to device, as program [probably] - * has changed it. - */ - dma_sync_single_for_device(&edev->pdev->dev, - metadata->mapping + padding, - length, PCI_DMA_TODEVICE); - - txq->sw_tx_ring.pages[idx] = metadata->data; - txq->sw_tx_prod++; - - /* Mark the fastpath for future XDP doorbell */ - fp->xdp_xmit = 1; - - return 0; -} - -/* Main transmit function */ -static netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct netdev_queue *netdev_txq; - struct qede_tx_queue *txq; - struct eth_tx_1st_bd *first_bd; - struct eth_tx_2nd_bd *second_bd = NULL; - struct eth_tx_3rd_bd *third_bd = NULL; - struct eth_tx_bd *tx_data_bd = NULL; - u16 txq_index; - u8 nbd = 0; - dma_addr_t mapping; - int rc, frag_idx = 0, ipv6_ext = 0; - u8 xmit_type; - u16 idx; - u16 hlen; - bool data_split = false; - - /* Get tx-queue context and netdev index */ - txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); - txq = edev->fp_array[edev->fp_num_rx + txq_index].txq; - netdev_txq = netdev_get_tx_queue(ndev, txq_index); - - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); - - xmit_type = qede_xmit_type(skb, &ipv6_ext); - -#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) - if (qede_pkt_req_lin(skb, xmit_type)) { - if (skb_linearize(skb)) { - DP_NOTICE(edev, - "SKB linearization failed - silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } -#endif - - /* Fill the entry in the SW ring and the BDs in the FW ring */ - idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; - txq->sw_tx_ring.skbs[idx].skb = skb; - first_bd = (struct eth_tx_1st_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; - - /* Map skb linear data for DMA and set in the first BD */ - mapping = dma_map_single(txq->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(txq->dev, mapping))) { - DP_NOTICE(edev, "SKB mapping failed\n"); - qede_free_failed_tx_pkt(txq, first_bd, 0, false); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - nbd++; - BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); - - /* In case there is IPv6 with extension headers or LSO we need 2nd and - * 3rd BDs. - */ - if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { - second_bd = (struct eth_tx_2nd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(second_bd, 0, sizeof(*second_bd)); - - nbd++; - third_bd = (struct eth_tx_3rd_bd *) - qed_chain_produce(&txq->tx_pbl); - memset(third_bd, 0, sizeof(*third_bd)); - - nbd++; - /* We need to fill in additional data in second_bd... */ - tx_data_bd = (struct eth_tx_bd *)second_bd; - } - - if (skb_vlan_tag_present(skb)) { - first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; - } - - /* Fill the parsing flags & params according to the requested offload */ - if (xmit_type & XMIT_L4_CSUM) { - /* We don't re-calculate IP checksum as it is already done by - * the upper stack - */ - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - first_bd->data.bitfields |= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - } - - /* Legacy FW had flipped behavior in regard to this bit - - * I.e., needed to set to prevent FW from touching encapsulated - * packets when it didn't need to. - */ - if (unlikely(txq->is_legacy)) - first_bd->data.bitfields ^= - 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; - - /* If the packet is IPv6 with extension header, indicate that - * to FW and pass few params, since the device cracker doesn't - * support parsing IPv6 with extension header/s. - */ - if (unlikely(ipv6_ext)) - qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); - } - - if (xmit_type & XMIT_LSO) { - first_bd->data.bd_flags.bitfields |= - (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); - third_bd->data.lso_mss = - cpu_to_le16(skb_shinfo(skb)->gso_size); - - if (unlikely(xmit_type & XMIT_ENC)) { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; - - if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { - u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; - - first_bd->data.bd_flags.bitfields |= 1 << tmp; - } - hlen = qede_get_skb_hlen(skb, true); - } else { - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = qede_get_skb_hlen(skb, false); - } - - /* @@@TBD - if will not be removed need to check */ - third_bd->data.bitfields |= - cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); - - /* Make life easier for FW guys who can't deal with header and - * data on same BD. If we need to split, use the second bd... - */ - if (unlikely(skb_headlen(skb) > hlen)) { - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "TSO split header size is %d (%x:%x)\n", - first_bd->nbytes, first_bd->addr.hi, - first_bd->addr.lo); - - mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), - le32_to_cpu(first_bd->addr.lo)) + - hlen; - - BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, - le16_to_cpu(first_bd->nbytes) - - hlen); - - /* this marks the BD as one that has no - * individual mapping - */ - txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; - - first_bd->nbytes = cpu_to_le16(hlen); - - tx_data_bd = (struct eth_tx_bd *)third_bd; - data_split = true; - } - } else { - first_bd->data.bitfields |= - (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << - ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - } - - /* Handle fragmented skb */ - /* special handle for frags inside 2nd and 3rd bds.. */ - while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - - if (tx_data_bd == (struct eth_tx_bd *)second_bd) - tx_data_bd = (struct eth_tx_bd *)third_bd; - else - tx_data_bd = NULL; - - frag_idx++; - } - - /* map last frags into 4th, 5th .... */ - for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { - tx_data_bd = (struct eth_tx_bd *) - qed_chain_produce(&txq->tx_pbl); - - memset(tx_data_bd, 0, sizeof(*tx_data_bd)); - - rc = map_frag_to_bd(txq, - &skb_shinfo(skb)->frags[frag_idx], - tx_data_bd); - if (rc) { - qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); - qede_update_tx_producer(txq); - return NETDEV_TX_OK; - } - } - - /* update the first BD with the actual num BDs */ - first_bd->data.nbds = nbd; - - netdev_tx_sent_queue(netdev_txq, skb->len); - - skb_tx_timestamp(skb); - - /* Advance packet producer only before sending the packet since mapping - * of pages may fail. - */ - txq->sw_tx_prod++; - - /* 'next page' entries are counted in the producer value */ - txq->tx_db.data.bd_prod = - cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) - qede_update_tx_producer(txq); - - if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) - < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) - qede_update_tx_producer(txq); - - netif_tx_stop_queue(netdev_txq); - txq->stopped_cnt++; - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Stop queue was called\n"); - /* paired memory barrier is in qede_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons - */ - smp_mb(); - - if (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1) && - (edev->state == QEDE_STATE_OPEN)) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, - "Wake queue was called\n"); - } - } - - return NETDEV_TX_OK; -} - -int qede_txq_has_work(struct qede_tx_queue *txq) -{ - u16 hw_bd_cons; - - /* Tell compiler that consumer and producer can change */ - barrier(); - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) - return 0; - - return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); -} - -static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct eth_tx_1st_bd *bd; - u16 hw_bd_cons; - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); - - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & - NUM_TX_BDS_MAX]); - - txq->sw_tx_cons++; - txq->xmit_pkts++; - } -} - -static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) -{ - struct netdev_queue *netdev_txq; - u16 hw_bd_cons; - unsigned int pkts_compl = 0, bytes_compl = 0; - int rc; - - netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); - - hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); - barrier(); - - while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - int len = 0; - - rc = qede_free_tx_pkt(edev, txq, &len); - if (rc) { - DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", - hw_bd_cons, - qed_chain_get_cons_idx(&txq->tx_pbl)); - break; - } - - bytes_compl += len; - pkts_compl++; - txq->sw_tx_cons++; - txq->xmit_pkts++; - } - - netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); - - /* Need to make the tx_bd_cons update visible to start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that - * start_xmit() will miss it and cause the queue to be stopped - * forever. - * On the other hand we need an rmb() here to ensure the proper - * ordering of bit testing in the following - * netif_tx_queue_stopped(txq) call. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(netdev_txq))) { - /* Taking tx_lock is needed to prevent reenabling the queue - * while it's empty. This could have happen if rx_action() gets - * suspended in qede_tx_int() after the condition before - * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): - * - * stops the queue->sees fresh tx_bd_cons->releases the queue-> - * sends some packets consuming the whole queue again-> - * stops the queue - */ - - __netif_tx_lock(netdev_txq, smp_processor_id()); - - if ((netif_tx_queue_stopped(netdev_txq)) && - (edev->state == QEDE_STATE_OPEN) && - (qed_chain_get_elem_left(&txq->tx_pbl) - >= (MAX_SKB_FRAGS + 1))) { - netif_tx_wake_queue(netdev_txq); - DP_VERBOSE(edev, NETIF_MSG_TX_DONE, - "Wake queue was called\n"); - } - - __netif_tx_unlock(netdev_txq); - } - - return 0; -} - -bool qede_has_rx_work(struct qede_rx_queue *rxq) -{ - u16 hw_comp_cons, sw_comp_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - return hw_comp_cons != sw_comp_cons; -} - -static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) -{ - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; -} - -/* This function reuses the buffer(from an offset) from - * consumer index to producer index in the bd ring - */ -static inline void qede_reuse_page(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *curr_prod; - dma_addr_t new_mapping; - - curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - *curr_prod = *curr_cons; - - new_mapping = curr_prod->mapping + curr_prod->page_offset; - - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - - rxq->sw_rx_prod++; - curr_cons->data = NULL; -} - -/* In case of allocation failures reuse buffers - * from consumer index to produce buffers for firmware - */ -void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) -{ - struct sw_rx_data *curr_cons; - - for (; count > 0; count--) { - curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - qede_reuse_page(rxq, curr_cons); - qede_rx_bd_ring_consume(rxq); - } -} - -static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) -{ - struct sw_rx_data *sw_rx_data; - struct eth_rx_bd *rx_bd; - dma_addr_t mapping; - struct page *data; - - data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!data)) - return -ENOMEM; - - /* Map the entire page as it would be used - * for multiple RX buffer segment size mapping. - */ - mapping = dma_map_page(rxq->dev, data, 0, - PAGE_SIZE, rxq->data_direction); - if (unlikely(dma_mapping_error(rxq->dev, mapping))) { - __free_page(data); - return -ENOMEM; - } - - sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - sw_rx_data->page_offset = 0; - sw_rx_data->data = data; - sw_rx_data->mapping = mapping; - - /* Advance PROD and get BD pointer */ - rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); - WARN_ON(!rx_bd); - rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - - rxq->sw_rx_prod++; - - return 0; -} - -static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, - struct sw_rx_data *curr_cons) -{ - /* Move to the next segment in the page */ - curr_cons->page_offset += rxq->rx_buf_seg_size; - - if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(rxq))) { - /* Since we failed to allocate new buffer - * current buffer can be used again. - */ - curr_cons->page_offset -= rxq->rx_buf_seg_size; - - return -ENOMEM; - } - - dma_unmap_page(rxq->dev, curr_cons->mapping, - PAGE_SIZE, rxq->data_direction); - } else { - /* Increment refcount of the page as we don't want - * network stack to take the ownership of the page - * which can be recycled multiple times by the driver. - */ - page_ref_inc(curr_cons->data); - qede_reuse_page(rxq, curr_cons); - } - - return 0; -} - -void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); - u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); - struct eth_rx_prod_data rx_prods = {0}; - - /* Update producers */ - rx_prods.bd_prod = cpu_to_le16(bd_prod); - rx_prods.cqe_prod = cpu_to_le16(cqe_prod); - - /* Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - */ - wmb(); - - internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), - (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); -} - -static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) -{ - enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; - enum rss_hash_type htype; - u32 hash = 0; - - htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); - if (htype) { - hash_type = ((htype == RSS_HASH_TYPE_IPV4) || - (htype == RSS_HASH_TYPE_IPV6)) ? - PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; - hash = le32_to_cpu(rss_hash); - } - skb_set_hash(skb, hash, hash_type); -} - -static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) -{ - skb_checksum_none_assert(skb); - - if (csum_flag & QEDE_CSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) - skb->csum_level = 1; -} - -static inline void qede_skb_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct sk_buff *skb, u16 vlan_tag) -{ - if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&fp->napi, skb); - fp->rxq->rcv_pkts++; -} - -static void qede_set_gro_params(struct qede_dev *edev, - struct sk_buff *skb, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); - - if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & - PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - else - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - - skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - - cqe->header_len; -} - -static int qede_fill_frag_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - u8 tpa_agg_index, u16 len_on_bd) -{ - struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & - NUM_RX_BDS_MAX]; - struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; - struct sk_buff *skb = tpa_info->skb; - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto out; - - /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, - len_on_bd); - - if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { - /* Incr page ref count to reuse on allocation failure - * so that it doesn't get freed while freeing SKB. - */ - page_ref_inc(current_bd->data); - goto out; - } - - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; - - skb->data_len += len_on_bd; - skb->truesize += rxq->rx_buf_seg_size; - skb->len += len_on_bd; - - return 0; - -out: - tpa_info->state = QEDE_AGG_STATE_ERROR; - qede_recycle_rx_bd_ring(rxq, 1); - - return -ENOMEM; -} - -static void qede_tpa_start(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_start_cqe *cqe) -{ - struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; - struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; - - sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; - - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; - - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); - if (unlikely(!tpa_info->skb)) { - DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - goto cons_buf; - } - - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); - tpa_info->frag_id = 0; - tpa_info->state = QEDE_AGG_STATE_START; - - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); - if ((le16_to_cpu(cqe->pars_flags.flags) >> - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & - PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) - tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); - else - tpa_info->vlan_tag = 0; - - qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); - - /* This is needed in order to enable forwarding support */ - qede_set_gro_params(edev, tpa_info->skb, cqe); - -cons_buf: /* We still need to handle bd_len_list to consume buffers */ - if (likely(cqe->ext_bd_len_list[0])) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->ext_bd_len_list[0])); - - if (unlikely(cqe->ext_bd_len_list[1])) { - DP_ERR(edev, - "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); - tpa_info->state = QEDE_AGG_STATE_ERROR; - } -} - -#ifdef CONFIG_INET -static void qede_gro_ip_csum(struct sk_buff *skb) -{ - const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct iphdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), - iph->saddr, iph->daddr, 0); - - tcp_gro_complete(skb); -} - -static void qede_gro_ipv6_csum(struct sk_buff *skb) -{ - struct ipv6hdr *iph = ipv6_hdr(skb); - struct tcphdr *th; - - skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - th = tcp_hdr(skb); - - th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), - &iph->saddr, &iph->daddr, 0); - tcp_gro_complete(skb); -} -#endif - -static void qede_gro_receive(struct qede_dev *edev, - struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) -{ - /* FW can send a single MTU sized packet from gro flow - * due to aggregation timeout/last segment etc. which - * is not expected to be a gro packet. If a skb has zero - * frags then simply push it in the stack as non gso skb. - */ - if (unlikely(!skb->data_len)) { - skb_shinfo(skb)->gso_type = 0; - skb_shinfo(skb)->gso_size = 0; - goto send_skb; - } - -#ifdef CONFIG_INET - if (skb_shinfo(skb)->gso_size) { - skb_reset_network_header(skb); - - switch (skb->protocol) { - case htons(ETH_P_IP): - qede_gro_ip_csum(skb); - break; - case htons(ETH_P_IPV6): - qede_gro_ipv6_csum(skb); - break; - default: - DP_ERR(edev, - "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", - ntohs(skb->protocol)); - } - } -#endif - -send_skb: - skb_record_rx_queue(skb, fp->rxq->rxq_id); - qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); -} - -static inline void qede_tpa_cont(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct eth_fast_path_rx_tpa_cont_cqe *cqe) -{ - int i; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA cont with more than a single len_list entry\n"); -} - -static void qede_tpa_end(struct qede_dev *edev, - struct qede_fastpath *fp, - struct eth_fast_path_rx_tpa_end_cqe *cqe) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_agg_info *tpa_info; - struct sk_buff *skb; - int i; - - tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - skb = tpa_info->skb; - - for (i = 0; cqe->len_list[i]; i++) - qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->len_list[i])); - if (unlikely(i > 1)) - DP_ERR(edev, - "Strange - TPA emd with more than a single len_list entry\n"); - - if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) - goto err; - - /* Sanity */ - if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) - DP_ERR(edev, - "Strange - TPA had %02x BDs, but SKB has only %d frags\n", - cqe->num_of_bds, tpa_info->frag_id); - if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) - DP_ERR(edev, - "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", - le16_to_cpu(cqe->total_packet_len), skb->len); - - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - - /* Finalize the SKB */ - skb->protocol = eth_type_trans(skb, edev->ndev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ - NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); - - qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); - - tpa_info->state = QEDE_AGG_STATE_NONE; - - return; -err: - tpa_info->state = QEDE_AGG_STATE_NONE; - dev_kfree_skb_any(tpa_info->skb); - tpa_info->skb = NULL; -} - -static bool qede_tunn_exist(u16 flag) -{ - return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << - PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); -} - -static u8 qede_check_tunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 tcsum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - tcsum = QEDE_TUNN_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | - PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return QEDE_CSUM_UNNECESSARY | tcsum; -} - -static u8 qede_check_notunn_csum(u16 flag) -{ - u16 csum_flag = 0; - u8 csum = 0; - - if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { - csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; - csum = QEDE_CSUM_UNNECESSARY; - } - - csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << - PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; - - if (csum_flag & flag) - return QEDE_CSUM_ERROR; - - return csum; -} - -static u8 qede_check_csum(u16 flag) -{ - if (!qede_tunn_exist(flag)) - return qede_check_notunn_csum(flag); - else - return qede_check_tunn_csum(flag); -} - -static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, - u16 flag) -{ - u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; - - if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << - ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || - (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << - PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) - return true; - - return false; -} - -/* Return true iff packet is to be passed to stack */ -static bool qede_rx_xdp(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - struct bpf_prog *prog, - struct sw_rx_data *bd, - struct eth_fast_path_rx_reg_cqe *cqe) -{ - u16 len = le16_to_cpu(cqe->len_on_first_bd); - struct xdp_buff xdp; - enum xdp_action act; - - xdp.data = page_address(bd->data) + cqe->placement_offset; - xdp.data_end = xdp.data + len; - - /* Queues always have a full reset currently, so for the time - * being until there's atomic program replace just mark read - * side for map helpers. - */ - rcu_read_lock(); - act = bpf_prog_run_xdp(prog, &xdp); - rcu_read_unlock(); - - if (act == XDP_PASS) - return true; - - /* Count number of packets not to be passed to stack */ - rxq->xdp_no_pass++; - - switch (act) { - case XDP_TX: - /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq)) { - qede_recycle_rx_bd_ring(rxq, 1); - return false; - } - - /* Now if there's a transmission problem, we'd still have to - * throw current buffer, as replacement was already allocated. - */ - if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(bd->data); - } - - /* Regardless, we've consumed an Rx BD */ - qede_rx_bd_ring_consume(rxq); - return false; - - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - case XDP_DROP: - qede_recycle_rx_bd_ring(rxq, cqe->bd_num); - } - - return false; -} - -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - memcpy(skb_put(skb, len), - page_address(page) + pad + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, pad + offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - -static int qede_rx_build_jumbo(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sk_buff *skb, - struct eth_fast_path_rx_reg_cqe *cqe, - u16 first_bd_len) -{ - u16 pkt_len = le16_to_cpu(cqe->pkt_len); - struct sw_rx_data *bd; - u16 bd_cons_idx; - u8 num_frags; - - pkt_len -= first_bd_len; - - /* We've already used one BD for the SKB. Now take care of the rest */ - for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { - u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : - pkt_len; - - if (unlikely(!cur_size)) { - DP_ERR(edev, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - goto out; - } - - /* We need a replacement buffer for each BD */ - if (unlikely(qede_alloc_rx_buffer(rxq))) - goto out; - - /* Now that we've allocated the replacement buffer, - * we can safely consume the next BD and map it to the SKB. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - qede_rx_bd_ring_consume(rxq); - - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); - - skb->truesize += PAGE_SIZE; - skb->data_len += cur_size; - skb->len += cur_size; - pkt_len -= cur_size; - } - - if (unlikely(pkt_len)) - DP_ERR(edev, - "Mapped all BDs of jumbo, but still have %d bytes\n", - pkt_len); - -out: - return num_frags; -} - -static int qede_rx_process_tpa_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq, - union eth_rx_cqe *cqe, - enum eth_rx_cqe_type type) -{ - switch (type) { - case ETH_RX_CQE_TYPE_TPA_START: - qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); - return 0; - case ETH_RX_CQE_TYPE_TPA_CONT: - qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); - return 0; - case ETH_RX_CQE_TYPE_TPA_END: - qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); - return 1; - default: - return 0; - } -} - -static int qede_rx_process_cqe(struct qede_dev *edev, - struct qede_fastpath *fp, - struct qede_rx_queue *rxq) -{ - struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); - struct eth_fast_path_rx_reg_cqe *fp_cqe; - u16 len, pad, bd_cons_idx, parse_flag; - enum eth_rx_cqe_type cqe_type; - union eth_rx_cqe *cqe; - struct sw_rx_data *bd; - struct sk_buff *skb; - __le16 flags; - u8 csum_flag; - - /* Get the CQE from the completion ring */ - cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); - cqe_type = cqe->fast_path_regular.type; - - /* Process an unlikely slowpath event */ - if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { - struct eth_slow_path_rx_cqe *sp_cqe; - - sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; - edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); - return 0; - } - - /* Handle TPA cqes */ - if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) - return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); - - /* Get the data from the SW ring; Consume it only after it's evident - * we wouldn't recycle it. - */ - bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; - bd = &rxq->sw_rx_ring[bd_cons_idx]; - - fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->len_on_first_bd); - pad = fp_cqe->placement_offset; - - /* Run eBPF program if one is attached */ - if (xdp_prog) - if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) - return 1; - - /* If this is an error packet then drop it */ - flags = cqe->fast_path_regular.pars_flags.flags; - parse_flag = le16_to_cpu(flags); - - csum_flag = qede_check_csum(parse_flag); - if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { - rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); - rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - } - - /* Basic validation passed; Need to prepare an SKB. This would also - * guarantee to finally consume the first BD upon success. - */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); - if (!skb) { - rxq->rx_alloc_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } - - /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed - * by a single cqe. - */ - if (fp_cqe->bd_num > 1) { - u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, - fp_cqe, len); - - if (unlikely(unmapped_frags > 0)) { - qede_recycle_rx_bd_ring(rxq, unmapped_frags); - dev_kfree_skb_any(skb); - return 0; - } - } - - /* The SKB contains all the data. Now prepare meta-magic */ - skb->protocol = eth_type_trans(skb, edev->ndev); - qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); - qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, rxq->rxq_id); - - /* SKB is prepared - pass it to stack */ - qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - return 1; -} - -static int qede_rx_int(struct qede_fastpath *fp, int budget) -{ - struct qede_rx_queue *rxq = fp->rxq; - struct qede_dev *edev = fp->edev; - u16 hw_comp_cons, sw_comp_cons; - int work_done = 0; - - hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - - /* Memory barrier to prevent the CPU from doing speculative reads of CQE - * / BD in the while-loop before reading hw_comp_cons. If the CQE is - * read before it is written by FW, then FW writes CQE and SB, and then - * the CPU reads the hw_comp_cons, it will use an old CQE. - */ - rmb(); - - /* Loop to complete all indicated BDs */ - while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { - qede_rx_process_cqe(edev, fp, rxq); - qed_chain_recycle_consumed(&rxq->rx_comp_ring); - sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); - work_done++; - } - - /* Update producers */ - qede_update_rx_prod(edev, rxq); - - return work_done; -} - -static bool qede_poll_is_more_work(struct qede_fastpath *fp) -{ - qed_sb_update_sb_idx(fp->sb_info); - - /* *_has_*_work() reads the status block, thus we need to ensure that - * status block indices have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that we won't write the - * "newer" value of the status block to HW (if there was a DMA right - * after qede_has_rx_work and if there is no rmb, the memory reading - * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). - * In this case there will never be another interrupt until there is - * another update of the status block, while there is still unhandled - * work. - */ - rmb(); - - if (likely(fp->type & QEDE_FASTPATH_RX)) - if (qede_has_rx_work(fp->rxq)) - return true; - - if (fp->type & QEDE_FASTPATH_XDP) - if (qede_txq_has_work(fp->xdp_tx)) - return true; - - if (likely(fp->type & QEDE_FASTPATH_TX)) - if (qede_txq_has_work(fp->txq)) - return true; - - return false; -} - -static int qede_poll(struct napi_struct *napi, int budget) -{ - struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); - struct qede_dev *edev = fp->edev; - int rx_work_done = 0; - - if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq)) - qede_tx_int(edev, fp->txq); - - if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) - qede_xdp_tx_int(edev, fp->xdp_tx); - - rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && - qede_has_rx_work(fp->rxq)) ? - qede_rx_int(fp, budget) : 0; - if (rx_work_done < budget) { - if (!qede_poll_is_more_work(fp)) { - napi_complete(napi); - - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); - } else { - rx_work_done = budget; - } - } - - if (fp->xdp_xmit) { - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); - - fp->xdp_xmit = 0; - fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); - qede_update_tx_producer(fp->xdp_tx); - } - - return rx_work_done; -} - -static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) -{ - struct qede_fastpath *fp = fp_cookie; - - qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); - - napi_schedule_irqoff(&fp->napi); - return IRQ_HANDLED; -} - -/* ------------------------------------------------------------------------- - * END OF FAST-PATH - * ------------------------------------------------------------------------- - */ - static int qede_open(struct net_device *ndev); static int qede_close(struct net_device *ndev); -static int qede_set_mac_addr(struct net_device *ndev, void *p); -static void qede_set_rx_mode(struct net_device *ndev); -static void qede_config_rx_mode(struct net_device *ndev); - -static int qede_set_ucast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} - -static int qede_set_ucast_rx_vlan(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - u16 vid) -{ - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; - - return edev->ops->filter_config(edev->cdev, &filter_cmd); -} void qede_fill_by_demand_stats(struct qede_dev *edev) { @@ -2019,9 +398,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static -struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -2051,8 +429,6 @@ struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, stats->collisions = edev->stats.tx_total_collisions; stats->rx_crc_errors = edev->stats.rx_crc_errors; stats->rx_frame_errors = edev->stats.rx_align_errors; - - return stats; } #ifdef CONFIG_QED_SRIOV @@ -2096,444 +472,17 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx, return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); } -#endif - -static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) -{ - struct qed_update_vport_params params; - int rc; - - /* Proceed only if action actually needs to be performed */ - if (edev->accept_any_vlan == action) - return; - - memset(¶ms, 0, sizeof(params)); - - params.vport_id = 0; - params.accept_any_vlan = action; - params.update_accept_any_vlan_flg = 1; - - rc = edev->ops->vport_update(edev->cdev, ¶ms); - if (rc) { - DP_ERR(edev, "Failed to %s accept-any-vlan\n", - action ? "enable" : "disable"); - } else { - DP_INFO(edev, "%s accept-any-vlan\n", - action ? "enabled" : "disabled"); - edev->accept_any_vlan = action; - } -} - -static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan, *tmp; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); - - vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) { - DP_INFO(edev, "Failed to allocate struct for vlan\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&vlan->list); - vlan->vid = vid; - vlan->configured = false; - - /* Verify vlan isn't already configured */ - list_for_each_entry(tmp, &edev->vlan_list, list) { - if (tmp->vid == vlan->vid) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "vlan already configured\n"); - kfree(vlan); - return -EEXIST; - } - } - - /* If interface is down, cache this VLAN ID and return */ - __qede_lock(edev); - if (edev->state != QEDE_STATE_OPEN) { - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, VLAN %d will be configured when interface is up\n", - vid); - if (vid != 0) - edev->non_configured_vlans++; - list_add(&vlan->list, &edev->vlan_list); - goto out; - } - - /* Check for the filter limit. - * Note - vlan0 has a reserved filter and can be added without - * worrying about quota - */ - if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || - (vlan->vid == 0)) { - rc = qede_set_ucast_rx_vlan(edev, - QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %d\n", - vlan->vid); - kfree(vlan); - goto out; - } - vlan->configured = true; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) - edev->configured_vlans++; - } else { - /* Out of quota; Activate accept-any-VLAN mode */ - if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, true); - - edev->non_configured_vlans++; - } - - list_add(&vlan->list, &edev->vlan_list); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_del_vlan_from_list(struct qede_dev *edev, - struct qede_vlan *vlan) -{ - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - if (vlan->configured) - edev->configured_vlans--; - else - edev->non_configured_vlans--; - } - - list_del(&vlan->list); - kfree(vlan); -} - -static int qede_configure_vlan_filters(struct qede_dev *edev) -{ - int rc = 0, real_rc = 0, accept_any_vlan = 0; - struct qed_dev_eth_info *dev_info; - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return 0; - - dev_info = &edev->dev_info; - - /* Configure non-configured vlans */ - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (vlan->configured) - continue; - - /* We have used all our credits, now enable accept_any_vlan */ - if ((vlan->vid != 0) && - (edev->configured_vlans == dev_info->num_vlan_filters)) { - accept_any_vlan = 1; - continue; - } - - DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); - - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, - vlan->vid); - if (rc) { - DP_ERR(edev, "Failed to configure VLAN %u\n", - vlan->vid); - real_rc = rc; - continue; - } - - vlan->configured = true; - /* vlan0 filter doesn't consume our VLAN filter's quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans--; - edev->configured_vlans++; - } - } - - /* enable accept_any_vlan mode if we have more VLANs than credits, - * or remove accept_any_vlan mode if we've actually removed - * a non-configured vlan, and all remaining vlans are truly configured. - */ - - if (accept_any_vlan) - qede_config_accept_any_vlan(edev, true); - else if (!edev->non_configured_vlans) - qede_config_accept_any_vlan(edev, false); - - return real_rc; -} -static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) { struct qede_dev *edev = netdev_priv(dev); - struct qede_vlan *vlan = NULL; - int rc = 0; - - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); - - /* Find whether entry exists */ - __qede_lock(edev); - list_for_each_entry(vlan, &edev->vlan_list, list) - if (vlan->vid == vid) - break; - - if (!vlan || (vlan->vid != vid)) { - DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Vlan isn't configured\n"); - goto out; - } - - if (edev->state != QEDE_STATE_OPEN) { - /* As interface is already down, we don't have a VPORT - * instance to remove vlan filter. So just update vlan list - */ - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "Interface is down, removing VLAN from list only\n"); - qede_del_vlan_from_list(edev, vlan); - goto out; - } - - /* Remove vlan */ - if (vlan->configured) { - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, - vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - goto out; - } - } - - qede_del_vlan_from_list(edev, vlan); - - /* We have removed a VLAN - try to see if we can - * configure non-configured VLAN from the list. - */ - rc = qede_configure_vlan_filters(edev); - -out: - __qede_unlock(edev); - return rc; -} - -static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) -{ - struct qede_vlan *vlan = NULL; - - if (list_empty(&edev->vlan_list)) - return; - - list_for_each_entry(vlan, &edev->vlan_list, list) { - if (!vlan->configured) - continue; - - vlan->configured = false; - - /* vlan0 filter isn't consuming out of our quota */ - if (vlan->vid != 0) { - edev->non_configured_vlans++; - edev->configured_vlans--; - } - DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", vlan->vid); - } - - edev->accept_any_vlan = false; -} - -static void qede_set_features_reload(struct qede_dev *edev, - struct qede_reload_args *args) -{ - edev->ndev->features = args->u.features; -} - -int qede_set_features(struct net_device *dev, netdev_features_t features) -{ - struct qede_dev *edev = netdev_priv(dev); - netdev_features_t changes = features ^ dev->features; - bool need_reload = false; - - /* No action needed if hardware GRO is disabled during driver load */ - if (changes & NETIF_F_GRO) { - if (dev->features & NETIF_F_GRO) - need_reload = !edev->gro_disable; - else - need_reload = edev->gro_disable; - } - - if (need_reload) { - struct qede_reload_args args; - - args.u.features = features; - args.func = &qede_set_features_reload; - - /* Make sure that we definitely need to reload. - * In case of an eBPF attached program, there will be no FW - * aggregations, so no need to actually reload. - */ - __qede_lock(edev); - if (edev->xdp_prog) - args.func(edev, &args); - else - qede_reload(edev, &args, true); - __qede_unlock(edev); - - return 1; - } - - return 0; -} - -static void qede_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (edev->vxlan_dst_port) - return; - - edev->vxlan_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (edev->geneve_dst_port) - return; - - edev->geneve_dst_port = t_port; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; - default: - return; - } - - schedule_delayed_work(&edev->sp_task, 0); -} - -static void qede_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (t_port != edev->vxlan_dst_port) - return; - - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", - t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (t_port != edev->geneve_dst_port) - return; - - edev->geneve_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - break; - default: - return; - } - - schedule_delayed_work(&edev->sp_task, 0); -} - -/* 8B udp header + 8B base tunnel header + 32B option length */ -#define QEDE_MAX_TUN_HDR_LEN 48 - -static netdev_features_t qede_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) -{ - if (skb->encapsulation) { - u8 l4_proto = 0; - - switch (vlan_get_protocol(skb)) { - case htons(ETH_P_IP): - l4_proto = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - l4_proto = ipv6_hdr(skb)->nexthdr; - break; - default: - return features; - } - - /* Disable offloads for geneve tunnels, as HW can't parse - * the geneve header which has option length greater than 32B. - */ - if ((l4_proto == IPPROTO_UDP) && - ((skb_inner_mac_header(skb) - - skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) - return features & ~(NETIF_F_CSUM_MASK | - NETIF_F_GSO_MASK); - } - - return features; -} - -static void qede_xdp_reload_func(struct qede_dev *edev, - struct qede_reload_args *args) -{ - struct bpf_prog *old; - - old = xchg(&edev->xdp_prog, args->u.new_prog); - if (old) - bpf_prog_put(old); -} - -static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) -{ - struct qede_reload_args args; - - if (prog && prog->xdp_adjust_head) { - DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - - /* If we're called, there was already a bpf reference increment */ - args.func = &qede_xdp_reload_func; - args.u.new_prog = prog; - qede_reload(edev, &args, false); - - return 0; -} - -static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) -{ - struct qede_dev *edev = netdev_priv(dev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return qede_xdp_set(edev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; - return 0; - default: + if (!edev->ops) return -EINVAL; - } + + return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); } +#endif static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@ -2546,6 +495,7 @@ static const struct net_device_ops qede_netdev_ops = { #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, + .ndo_set_vf_trust = qede_set_vf_trust, #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, @@ -2814,7 +764,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 192; + pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -3215,8 +1165,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) goto err; /* Allocate buffers for the Rx ring */ + rxq->filled_buffers = 0; for (i = 0; i < rxq->num_rx_buffers; i++) { - rc = qede_alloc_rx_buffer(rxq); + rc = qede_alloc_rx_buffer(rxq, false); if (rc) { DP_ERR(edev, "Rx buffers allocation failed at index %d\n", i); @@ -3564,19 +1515,24 @@ static int qede_stop_txq(struct qede_dev *edev, static int qede_stop_queues(struct qede_dev *edev) { - struct qed_update_vport_params vport_update_params; + struct qed_update_vport_params *vport_update_params; struct qed_dev *cdev = edev->cdev; struct qede_fastpath *fp; int rc, i; /* Disable the vport */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = 0; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 0; - vport_update_params.update_rss_flg = 0; + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + + vport_update_params->vport_id = 0; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 0; + vport_update_params->update_rss_flg = 0; + + rc = edev->ops->vport_update(cdev, vport_update_params); + vfree(vport_update_params); - rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { DP_ERR(edev, "Failed to update vport\n"); return rc; @@ -3688,11 +1644,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_params vport_update_params; - struct qed_queue_start_common_params q_params; struct qed_dev_info *qed_info = &edev->dev_info.common; + struct qed_update_vport_params *vport_update_params; + struct qed_queue_start_common_params q_params; struct qed_start_vport_params start = {0}; - bool reset_rss_indir = false; int rc, i; if (!edev->num_queues) { @@ -3701,6 +1656,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) return -EINVAL; } + vport_update_params = vzalloc(sizeof(*vport_update_params)); + if (!vport_update_params) + return -ENOMEM; + start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; @@ -3712,7 +1671,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); - return rc; + goto out; } DP_VERBOSE(edev, NETIF_MSG_IFUP, @@ -3748,7 +1707,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) { DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; + goto out; } /* Use the return parameters */ @@ -3764,108 +1723,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); if (rc) - return rc; + goto out; fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); if (IS_ERR(fp->rxq->xdp_prog)) { rc = PTR_ERR(fp->rxq->xdp_prog); fp->rxq->xdp_prog = NULL; - return rc; + goto out; } } if (fp->type & QEDE_FASTPATH_TX) { rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); if (rc) - return rc; + goto out; } } /* Prepare and send the vport enable */ - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = start.vport_id; - vport_update_params.update_vport_active_flg = 1; - vport_update_params.vport_active_flg = 1; + vport_update_params->vport_id = start.vport_id; + vport_update_params->update_vport_active_flg = 1; + vport_update_params->vport_active_flg = 1; if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { - vport_update_params.update_tx_switching_flg = 1; - vport_update_params.tx_switching_flg = 1; + vport_update_params->update_tx_switching_flg = 1; + vport_update_params->tx_switching_flg = 1; } - /* Fill struct with RSS params */ - if (QEDE_RSS_COUNT(edev) > 1) { - vport_update_params.update_rss_flg = 1; - - /* Need to validate current RSS config uses valid entries */ - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - if (edev->rss_params.rss_ind_table[i] >= - QEDE_RSS_COUNT(edev)) { - reset_rss_indir = true; - break; - } - } - - if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || - reset_rss_indir) { - u16 val; - - for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { - u16 indir_val; + qede_fill_rss_params(edev, &vport_update_params->rss_params, + &vport_update_params->update_rss_flg); - val = QEDE_RSS_COUNT(edev); - indir_val = ethtool_rxfh_indir_default(i, val); - edev->rss_params.rss_ind_table[i] = indir_val; - } - edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { - netdev_rss_key_fill(edev->rss_params.rss_key, - sizeof(edev->rss_params.rss_key)); - edev->rss_params_inited |= QEDE_RSS_KEY_INITED; - } - - if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { - edev->rss_params.rss_caps = QED_RSS_IPV4 | - QED_RSS_IPV6 | - QED_RSS_IPV4_TCP | - QED_RSS_IPV6_TCP; - edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; - } - - memcpy(&vport_update_params.rss_params, &edev->rss_params, - sizeof(vport_update_params.rss_params)); - } else { - memset(&vport_update_params.rss_params, 0, - sizeof(vport_update_params.rss_params)); - } - - rc = edev->ops->vport_update(cdev, &vport_update_params); - if (rc) { + rc = edev->ops->vport_update(cdev, vport_update_params); + if (rc) DP_ERR(edev, "Update V-PORT failed %d\n", rc); - return rc; - } - - return 0; -} - -static int qede_set_mcast_rx_mac(struct qede_dev *edev, - enum qed_filter_xcast_params_type opcode, - unsigned char *mac, int num_macs) -{ - struct qed_filter_params filter_cmd; - int i; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; - - for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); - - return edev->ops->filter_config(edev->cdev, &filter_cmd); +out: + vfree(vport_update_params); + return rc; } enum qede_unload_mode { @@ -4097,192 +1992,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } - -static int qede_set_mac_addr(struct net_device *ndev, void *p) -{ - struct qede_dev *edev = netdev_priv(ndev); - struct sockaddr *addr = p; - int rc; - - ASSERT_RTNL(); /* @@@TBD To be removed */ - - DP_INFO(edev, "Set_mac_addr called\n"); - - if (!is_valid_ether_addr(addr->sa_data)) { - DP_NOTICE(edev, "The MAC address is not valid\n"); - return -EFAULT; - } - - if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { - DP_NOTICE(edev, "qed prevents setting MAC\n"); - return -EINVAL; - } - - ether_addr_copy(ndev->dev_addr, addr->sa_data); - - if (!netif_running(ndev)) { - DP_NOTICE(edev, "The device is currently down\n"); - return 0; - } - - /* Remove the previous primary mac */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - edev->primary_mac); - if (rc) - return rc; - - edev->ops->common->update_mac(edev->cdev, addr->sa_data); - - /* Add MAC filter according to the new unicast HW MAC address */ - ether_addr_copy(edev->primary_mac, ndev->dev_addr); - return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - edev->primary_mac); -} - -static int -qede_configure_mcast_filtering(struct net_device *ndev, - enum qed_filter_rx_mode_type *accept_flags) -{ - struct qede_dev *edev = netdev_priv(ndev); - unsigned char *mc_macs, *temp; - struct netdev_hw_addr *ha; - int rc = 0, mc_count; - size_t size; - - size = 64 * ETH_ALEN; - - mc_macs = kzalloc(size, GFP_KERNEL); - if (!mc_macs) { - DP_NOTICE(edev, - "Failed to allocate memory for multicast MACs\n"); - rc = -ENOMEM; - goto exit; - } - - temp = mc_macs; - - /* Remove all previously configured MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, - mc_macs, 1); - if (rc) - goto exit; - - netif_addr_lock_bh(ndev); - - mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - } - - netif_addr_unlock_bh(ndev); - - /* Check for all multicast @@@TBD resource allocation */ - if ((ndev->flags & IFF_ALLMULTI) || - (mc_count > 64)) { - if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) - *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - } else { - /* Add all multicast MAC filters */ - rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, - mc_macs, mc_count); - } - -exit: - kfree(mc_macs); - return rc; -} - -static void qede_set_rx_mode(struct net_device *ndev) -{ - struct qede_dev *edev = netdev_priv(ndev); - - set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} - -/* Must be called with qede_lock held */ -static void qede_config_rx_mode(struct net_device *ndev) -{ - enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; - struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; - unsigned char *uc_macs, *temp; - struct netdev_hw_addr *ha; - int rc, uc_count; - size_t size; - - netif_addr_lock_bh(ndev); - - uc_count = netdev_uc_count(ndev); - size = uc_count * ETH_ALEN; - - uc_macs = kzalloc(size, GFP_ATOMIC); - if (!uc_macs) { - DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); - netif_addr_unlock_bh(ndev); - return; - } - - temp = uc_macs; - netdev_for_each_uc_addr(ha, ndev) { - ether_addr_copy(temp, ha->addr); - temp += ETH_ALEN; - } - - netif_addr_unlock_bh(ndev); - - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - - /* Remove all previous unicast secondary macs and multicast macs - * (configrue / leave the primary mac) - */ - rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, - edev->primary_mac); - if (rc) - goto out; - - /* Check for promiscuous */ - if ((ndev->flags & IFF_PROMISC) || - (uc_count > edev->dev_info.num_mac_filters - 1)) { - accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; - } else { - /* Add MAC filters according to the unicast secondary macs */ - int i; - - temp = uc_macs; - for (i = 0; i < uc_count; i++) { - rc = qede_set_ucast_rx_mac(edev, - QED_FILTER_XCAST_TYPE_ADD, - temp); - if (rc) - goto out; - - temp += ETH_ALEN; - } - - rc = qede_configure_mcast_filtering(ndev, &accept_flags); - if (rc) - goto out; - } - - /* take care of VLAN mode */ - if (ndev->flags & IFF_PROMISC) { - qede_config_accept_any_vlan(edev, true); - } else if (!edev->non_configured_vlans) { - /* It's possible that accept_any_vlan mode is set due to a - * previous setting of IFF_PROMISC. If vlan credits are - * sufficient, disable accept_any_vlan. - */ - qede_config_accept_any_vlan(edev, false); - } - - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); -out: - kfree(uc_macs); -} diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c index 49272716a7c4..f00657ce7c8f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_roce.c +++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c @@ -1,5 +1,5 @@ /* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 5c100ab86c00..ea38236f1ced 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, qdev->ndev); - netif_receive_skb(skb); + napi_gro_receive(&qdev->napi, skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) @@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, } skb2->protocol = eth_type_trans(skb2, qdev->ndev); - netif_receive_skb(skb2); + napi_gro_receive(&qdev->napi, skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; @@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } -static int ql_tx_rx_clean(struct ql3_adapter *qdev, - int *tx_cleaned, int *rx_cleaned, int work_to_do) +static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; @@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != - qdev->rsp_consumer_index) && (work_done < work_to_do)) { + qdev->rsp_consumer_index) && (work_done < budget)) { net_rsp = qdev->rsp_current; rmb(); @@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); - (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); - (*rx_cleaned)++; + work_done++; break; default: { u32 *tmp = (u32 *)net_rsp; @@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, qdev->rsp_current++; } - work_done = *tx_cleaned + *rx_cleaned; } return work_done; @@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); - int rx_cleaned = 0, tx_cleaned = 0; - unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + int work_done; - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + work_done = ql_tx_rx_clean(qdev, budget); - if (tx_cleaned + rx_cleaned != budget) { - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __napi_complete(napi); + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&qdev->hw_lock, flags); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, flags); ql_enable_interrupts(qdev); } - return tx_cleaned + rx_cleaned; + return work_done; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index fedd7366713c..84dd83031a1b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); qlcnic_enable_tx_intr(adapter, tx_ring); @@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) work_done = qlcnic_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) work_done = budget; if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) adapter = sds_ring->adapter; work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); if (work_done < budget) { - napi_complete(&sds_ring->napi); + napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 1409412ab39d..e9e647072596 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile index 7a6687982dae..fc57cedf4c0c 100644 --- a/drivers/net/ethernet/qualcomm/emac/Makefile +++ b/drivers/net/ethernet/qualcomm/emac/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o -qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \ +qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \ emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \ emac-sgmii-qdf2400.o diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c new file mode 100644 index 000000000000..cfc57d2c64f9 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -0,0 +1,185 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/ethtool.h> +#include <linux/phy.h> + +#include "emac.h" + +static const char * const emac_ethtool_stat_strings[] = { + "rx_ok", + "rx_bcast", + "rx_mcast", + "rx_pause", + "rx_ctrl", + "rx_fcs_err", + "rx_len_err", + "rx_byte_cnt", + "rx_runt", + "rx_frag", + "rx_sz_64", + "rx_sz_65_127", + "rx_sz_128_255", + "rx_sz_256_511", + "rx_sz_512_1023", + "rx_sz_1024_1518", + "rx_sz_1519_max", + "rx_sz_ov", + "rx_rxf_ov", + "rx_align_err", + "rx_bcast_byte_cnt", + "rx_mcast_byte_cnt", + "rx_err_addr", + "rx_crc_align", + "rx_jabbers", + "tx_ok", + "tx_bcast", + "tx_mcast", + "tx_pause", + "tx_exc_defer", + "tx_ctrl", + "tx_defer", + "tx_byte_cnt", + "tx_sz_64", + "tx_sz_65_127", + "tx_sz_128_255", + "tx_sz_256_511", + "tx_sz_512_1023", + "tx_sz_1024_1518", + "tx_sz_1519_max", + "tx_1_col", + "tx_2_col", + "tx_late_col", + "tx_abort_col", + "tx_underrun", + "tx_rd_eop", + "tx_len_err", + "tx_trunc", + "tx_bcast_byte", + "tx_mcast_byte", + "tx_col", +}; + +#define EMAC_STATS_LEN ARRAY_SIZE(emac_ethtool_stat_strings) + +static u32 emac_get_msglevel(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->msg_enable; +} + +static void emac_set_msglevel(struct net_device *netdev, u32 data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->msg_enable = data; +} + +static int emac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return EMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < EMAC_STATS_LEN; i++) { + strlcpy(data, emac_ethtool_stat_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static void emac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + spin_lock(&adpt->stats.lock); + + emac_update_hw_stats(adpt); + memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64)); + + spin_unlock(&adpt->stats.lock); +} + +static int emac_nway_reset(struct net_device *netdev) +{ + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +static void emac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + ring->rx_max_pending = EMAC_MAX_RX_DESCS; + ring->tx_max_pending = EMAC_MAX_TX_DESCS; + ring->rx_pending = adpt->rx_desc_cnt; + ring->tx_pending = adpt->tx_desc_cnt; +} + +static void emac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct phy_device *phydev = netdev->phydev; + + if (phydev) { + if (phydev->autoneg) + pause->autoneg = 1; + if (phydev->pause) + pause->rx_pause = 1; + if (phydev->pause != phydev->asym_pause) + pause->tx_pause = 1; + } +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + + .get_msglevel = emac_get_msglevel, + .set_msglevel = emac_set_msglevel, + + .get_sset_count = emac_get_sset_count, + .get_strings = emac_get_strings, + .get_ethtool_stats = emac_get_ethtool_stats, + + .get_ringparam = emac_get_ringparam, + .get_pauseparam = emac_get_pauseparam, + + .nway_reset = emac_nway_reset, + + .get_link = ethtool_op_get_link, +}; + +void emac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &emac_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0b4deb31e742..b991219862b1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -103,14 +103,6 @@ #define RXEN 0x00000002 #define TXEN 0x00000001 - -/* EMAC_WOL_CTRL0 */ -#define LK_CHG_PME 0x20 -#define LK_CHG_EN 0x10 -#define MG_FRAME_PME 0x8 -#define MG_FRAME_EN 0x4 -#define WK_FRAME_EN 0x1 - /* EMAC_DESC_CTRL_3 */ #define RFD_RING_SIZE_BMSK 0xfff @@ -314,8 +306,6 @@ struct emac_skb_cb { RX_PKT_INT2 |\ RX_PKT_INT3) -#define EMAC_MAC_IRQ_RES "core0" - void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) { u32 crc32, bit, reg, mta; @@ -558,7 +548,7 @@ void emac_mac_reset(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); } -void emac_mac_start(struct emac_adapter *adpt) +static void emac_mac_start(struct emac_adapter *adpt) { struct phy_device *phydev = adpt->phydev; u32 mac, csr1; @@ -621,8 +611,6 @@ void emac_mac_start(struct emac_adapter *adpt) emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL, (HEADER_ENABLE | HEADER_CNT_EN), 0); - - emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); } void emac_mac_stop(struct emac_adapter *adpt) @@ -963,12 +951,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; - if (phydev->link) + if (phydev->link) { emac_mac_start(adpt); - else + sgmii->link_up(adpt); + } else { + sgmii->link_down(adpt); emac_mac_stop(adpt); + } phy_print_status(phydev); } @@ -977,40 +969,26 @@ static void emac_adjust_link(struct net_device *netdev) int emac_mac_up(struct emac_adapter *adpt) { struct net_device *netdev = adpt->netdev; - struct emac_irq *irq = &adpt->irq; int ret; emac_mac_rx_tx_ring_reset_all(adpt); emac_mac_config(adpt); - - ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq); - if (ret) { - netdev_err(adpt->netdev, "could not request %s irq\n", - EMAC_MAC_IRQ_RES); - return ret; - } - emac_mac_rx_descs_refill(adpt, &adpt->rx_q); + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, PHY_INTERFACE_MODE_SGMII); if (ret) { netdev_err(adpt->netdev, "could not connect phy\n"); - free_irq(irq->irq, irq); return ret; } + phy_attached_print(adpt->phydev, NULL); + /* enable mac irq */ writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); - /* Enable pause frames. Without this feature, the EMAC has been shown - * to receive (and drop) frames with FCS errors at gigabit connections. - */ - adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); napi_enable(&adpt->rx_q.napi); @@ -1036,7 +1014,6 @@ void emac_mac_down(struct emac_adapter *adpt) writel(DIS_INT, adpt->base + EMAC_INT_STATUS); writel(0, adpt->base + EMAC_INT_MASK); synchronize_irq(adpt->irq.irq); - free_irq(adpt->irq.irq, &adpt->irq); phy_disconnect(adpt->phydev); @@ -1213,7 +1190,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd), (bool)RRD_CVTAG(&rrd)); - netdev->last_rx = jiffies; (*num_pkts)++; } while (*num_pkts < max_pkts); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index f3aa24dc4a29..5028fb4bec2b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -230,7 +230,6 @@ struct emac_adapter; int emac_mac_up(struct emac_adapter *adpt); void emac_mac_down(struct emac_adapter *adpt); void emac_mac_reset(struct emac_adapter *adpt); -void emac_mac_start(struct emac_adapter *adpt); void emac_mac_stop(struct emac_adapter *adpt); void emac_mac_mode_config(struct emac_adapter *adpt); void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 2851b4c56570..441c19366489 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -22,8 +22,6 @@ #include <linux/acpi.h> #include "emac.h" #include "emac-mac.h" -#include "emac-phy.h" -#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_MDIO_CTRL 0x001414 @@ -228,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) return -ENODEV; } - if (adpt->phydev->drv) - phy_attached_print(adpt->phydev, NULL); - return 0; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h index 49f3701a6dd7..c0c301c72129 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h @@ -13,19 +13,6 @@ #ifndef _EMAC_PHY_H_ #define _EMAC_PHY_H_ -typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); - -/** emac_phy - internal emac phy - * @base base address - * @digital per-lane digital block - * @initialize initialization function - */ -struct emac_phy { - void __iomem *base; - void __iomem *digital; - emac_sgmii_initialize initialize; -}; - struct emac_adapter; int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c index af690e1a6e7b..10de8d0d9a56 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c @@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = { int emac_sgmii_init_fsm9900(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; unsigned int i; emac_reg_write_all(phy->base, physical_coding_sublayer_programming, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c index 5b8419498ef1..f62c215be779 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c @@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2400(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c index 6170200d7479..b9c0df7bdd15 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c @@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = { int emac_sgmii_init_qdf2432(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; void __iomem *phy_regs = phy->base; void __iomem *laned = phy->digital; unsigned int i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index bf722a9bb09d..040b28977ee7 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -25,7 +25,9 @@ #define EMAC_SGMII_PHY_SPEED_CFG1 0x0074 #define EMAC_SGMII_PHY_IRQ_CMD 0x00ac #define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0 +#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4 #define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8 +#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4 #define FORCE_AN_TX_CFG BIT(5) #define FORCE_AN_RX_CFG BIT(4) @@ -36,6 +38,8 @@ #define SPDMODE_100 BIT(0) #define SPDMODE_10 0 +#define CDR_ALIGN_DET BIT(6) + #define IRQ_GLOBAL_CLEAR BIT(0) #define DECODE_CODE_ERR BIT(7) @@ -44,52 +48,28 @@ #define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 #define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR) +#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR) #define SERDES_START_WAIT_TIMES 100 -static int emac_sgmii_link_init(struct emac_adapter *adpt) +/* Initialize the SGMII link between the internal and external PHYs. */ +static void emac_sgmii_link_init(struct emac_adapter *adpt) { - struct phy_device *phydev = adpt->phydev; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; + /* Always use autonegotiation. It works no matter how the external + * PHY is configured. + */ val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - - if (phydev->autoneg == AUTONEG_ENABLE) { - val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); - val |= AN_ENABLE; - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } else { - u32 speed_cfg; - - switch (phydev->speed) { - case SPEED_10: - speed_cfg = SPDMODE_10; - break; - case SPEED_100: - speed_cfg = SPDMODE_100; - break; - case SPEED_1000: - speed_cfg = SPDMODE_1000; - break; - default: - return -EINVAL; - } - - if (phydev->duplex == DUPLEX_FULL) - speed_cfg |= DUPLEX_MODE; - - val &= ~AN_ENABLE; - writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1); - writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); - } - - return 0; + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); + val |= AN_ENABLE; + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); } static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 status; writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); @@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) return 0; } +/* The number of decode errors that triggers a reset */ +#define DECODE_ERROR_LIMIT 2 + +static irqreturn_t emac_sgmii_interrupt(int irq, void *data) +{ + struct emac_adapter *adpt = data; + struct emac_sgmii *phy = &adpt->phy; + u32 status; + + status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS); + status &= SGMII_ISR_MASK; + if (!status) + return IRQ_HANDLED; + + /* If we get a decoding error and CDR is not locked, then try + * resetting the internal PHY. The internal PHY uses an embedded + * clock with Clock and Data Recovery (CDR) to recover the + * clock and data. + */ + if (status & SGMII_PHY_INTERRUPT_ERR) { + int count; + + /* The SGMII is capable of recovering from some decode + * errors automatically. However, if we get multiple + * decode errors in a row, then assume that something + * is wrong and reset the interface. + */ + count = atomic_inc_return(&phy->decode_error_count); + if (count == DECODE_ERROR_LIMIT) { + schedule_work(&adpt->work_thread); + atomic_set(&phy->decode_error_count, 0); + } + } else { + /* We only care about consecutive decode errors. */ + atomic_set(&phy->decode_error_count, 0); + } + + if (emac_sgmii_irq_clear(adpt, status)) { + netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n"); + schedule_work(&adpt->work_thread); + } + + return IRQ_HANDLED; +} + static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) { - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; u32 val; /* Reset PHY */ @@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt) int ret; emac_sgmii_reset_prepare(adpt); - - ret = emac_sgmii_link_init(adpt); - if (ret) { - netdev_err(adpt->netdev, "unsupported link speed\n"); - return; - } + emac_sgmii_link_init(adpt); ret = adpt->phy.initialize(adpt); if (ret) @@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt) ret); } +static int emac_sgmii_open(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + if (sgmii->irq) { + /* Make sure interrupts are cleared and disabled first */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0, + "emac-sgmii", adpt); + if (ret) { + netdev_err(adpt->netdev, + "could not register handler for internal PHY\n"); + return ret; + } + } + + return 0; +} + +static int emac_sgmii_close(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Make sure interrupts are disabled */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + free_irq(sgmii->irq, adpt); + + return 0; +} + +/* The error interrupts are only valid after the link is up */ +static int emac_sgmii_link_up(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + int ret; + + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; + + writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + return 0; +} + +static int emac_sgmii_link_down(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = &adpt->phy; + + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + + return 0; +} + static int emac_sgmii_acpi_match(struct device *dev, void *data) { #ifdef CONFIG_ACPI @@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_initialize *initialize = data; + emac_sgmii_function *initialize = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = { int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; - struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *phy = &adpt->phy; struct resource *res; int ret; @@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_initialize)match->data; + phy->initialize = (emac_sgmii_function)match->data; } + phy->open = emac_sgmii_open; + phy->close = emac_sgmii_close; + phy->link_up = emac_sgmii_link_up; + phy->link_down = emac_sgmii_link_down; + /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (ret) goto error; - emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + emac_sgmii_link_init(adpt); + + ret = platform_get_irq(sgmii_pdev, 0); + if (ret > 0) + phy->irq = ret; /* We've remapped the addresses, so we don't need the device any * more. of_find_device_by_node() says we should release it. diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index 80ed3dc3157a..e7c0c3b2baa4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,6 +16,31 @@ struct emac_adapter; struct platform_device; +typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); + +/** emac_sgmii - internal emac phy + * @base base address + * @digital per-lane digital block + * @irq the interrupt number + * @decode_error_count reference count of consecutive decode errors + * @initialize initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_up called when the link comes up + * @link_down called when the link comes down + */ +struct emac_sgmii { + void __iomem *base; + void __iomem *digital; + unsigned int irq; + atomic_t decode_error_count; + emac_sgmii_function initialize; + emac_sgmii_function open; + emac_sgmii_function close; + emac_sgmii_function link_up; + emac_sgmii_function link_down; +}; + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); void emac_sgmii_reset(struct emac_adapter *adpt); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index f46d300bd585..3387c0a88746 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget) emac_mac_rx_process(adpt, rx_q, &work_done, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); irq->mask |= rx_q->intr; writel(irq->mask, adpt->base + EMAC_INT_MASK); @@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) static int emac_open(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_irq *irq = &adpt->irq; int ret; + ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq); + if (ret) { + netdev_err(adpt->netdev, "could not request emac-core0 irq\n"); + return ret; + } + /* allocate rx/tx dma buffer & descriptors */ ret = emac_mac_rx_tx_rings_alloc_all(adpt); if (ret) { netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); + free_irq(irq->irq, irq); return ret; } ret = emac_mac_up(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); return ret; } - emac_mac_start(adpt); + ret = adpt->phy.open(adpt); + if (ret) { + emac_mac_down(adpt); + emac_mac_rx_tx_rings_free_all(adpt); + free_irq(irq->irq, irq); + return ret; + } return 0; } @@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); + adpt->phy.close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); + free_irq(adpt->irq.irq, &adpt->irq); + mutex_unlock(&adpt->reset_lock); return 0; @@ -311,45 +329,56 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(netdev->phydev, ifr, cmd); } -/* Provide network statistics info for the interface */ -static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *net_stats) +/** + * emac_update_hw_stats - read the EMAC stat registers + * + * Reads the stats registers and write the values to adpt->stats. + * + * adpt->stats.lock must be held while calling this function, + * and while reading from adpt->stats. + */ +void emac_update_hw_stats(struct emac_adapter *adpt) { - struct emac_adapter *adpt = netdev_priv(netdev); - unsigned int addr = REG_MAC_RX_STATUS_BIN; struct emac_stats *stats = &adpt->stats; u64 *stats_itr = &adpt->stats.rx_ok; - u32 val; - - spin_lock(&stats->lock); + void __iomem *base = adpt->base; + unsigned int addr; + addr = REG_MAC_RX_STATUS_BIN; while (addr <= REG_MAC_RX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; + *stats_itr += readl_relaxed(base + addr); stats_itr++; addr += sizeof(u32); } /* additional rx status */ - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23); - adpt->stats.rx_crc_align += val; - val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24); - adpt->stats.rx_jabbers += val; + stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23); + stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24); /* update tx status */ addr = REG_MAC_TX_STATUS_BIN; - stats_itr = &adpt->stats.tx_ok; + stats_itr = &stats->tx_ok; while (addr <= REG_MAC_TX_STATUS_END) { - val = readl_relaxed(adpt->base + addr); - *stats_itr += val; - ++stats_itr; + *stats_itr += readl_relaxed(base + addr); + stats_itr++; addr += sizeof(u32); } /* additional tx status */ - val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25); - adpt->stats.tx_col += val; + stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25); +} + +/* Provide network statistics info for the interface */ +static void emac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_stats *stats = &adpt->stats; + + spin_lock(&stats->lock); + + emac_update_hw_stats(adpt); /* return parsed statistics */ net_stats->rx_packets = stats->rx_ok; @@ -377,8 +406,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, net_stats->tx_window_errors = stats->tx_late_col; spin_unlock(&stats->lock); - - return net_stats; } static const struct net_device_ops emac_netdev_ops = { @@ -593,7 +620,7 @@ static int emac_probe(struct platform_device *pdev) { struct net_device *netdev; struct emac_adapter *adpt; - struct emac_phy *phy; + struct emac_sgmii *phy; u16 devid, revid; u32 reg; int ret; @@ -620,12 +647,14 @@ static int emac_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); + emac_set_ethtool_ops(netdev); adpt = netdev_priv(netdev); adpt->netdev = netdev; adpt->msg_enable = EMAC_MSG_DEFAULT; phy = &adpt->phy; + atomic_set(&phy->decode_error_count, 0); mutex_init(&adpt->reset_lock); spin_lock_init(&adpt->stats.lock); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 0c76e6cb8c9e..ef91dcc7f646 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include "emac-mac.h" #include "emac-phy.h" +#include "emac-sgmii.h" /* EMAC base register offsets */ #define EMAC_DMA_MAS_CTRL 0x001400 @@ -166,10 +167,6 @@ enum emac_clk_id { #define EMAC_MAX_SETUP_LNK_CYCLE 100 -/* Wake On Lan */ -#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ -#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ - struct emac_stats { /* rx */ u64 rx_ok; /* good packets */ @@ -291,7 +288,7 @@ struct emac_adapter { void __iomem *base; void __iomem *csr; - struct emac_phy phy; + struct emac_sgmii phy; struct emac_stats stats; struct emac_irq irq; @@ -330,6 +327,8 @@ struct emac_adapter { int emac_reinit_locked(struct emac_adapter *adpt); void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); -irqreturn_t emac_isr(int irq, void *data); + +void emac_set_ethtool_ops(struct net_device *netdev); +void emac_update_hw_stats(struct emac_adapter *adpt); #endif /* _EMAC_H_ */ diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 0b3cd58093d5..672f6b696069 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) struct cp_private *cp = container_of(napi, struct cp_private, napi); struct net_device *dev = cp->dev; unsigned int rx_tail = cp->rx_tail; - int rx; + int rx = 0; - rx = 0; -rx_status_loop: cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { @@ -556,15 +554,10 @@ rx_next: /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx < budget) { + if (rx < budget && napi_complete_done(napi, rx)) { unsigned long flags; - if (cpr16(IntrStatus) & cp_rx_intr_mask) - goto rx_status_loop; - - napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); - __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); spin_unlock_irqrestore(&cp->lock, flags); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 9bc047ac883b..89631753e799 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 - *stats); +static void rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); @@ -2136,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - /* - * Order is important since data can get interrupted - * again when we think we are done. - */ + spin_lock_irqsave(&tp->lock, flags); - __napi_complete(napi); RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } @@ -2516,7 +2511,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } -static struct rtnl_link_stats64 * +static void rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8139_private *tp = netdev_priv(dev); @@ -2544,8 +2539,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); - - return stats; } /* Set or clear the multicast filter for this adaptor. diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 570ed3bd3cbf..9bcd4aefc9c5 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -170,7 +170,7 @@ struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ - long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ @@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } num_tx_since_rx++; } else if (num_tx_since_rx > 8 && - time_after(jiffies, dev->last_rx + HZ)) { + time_after(jiffies, lp->last_rx_time + HZ)) { if (net_debug > 2) printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " "%ld jiffies status %02x CMR1 %02x.\n", dev->name, - num_tx_since_rx, jiffies - dev->last_rx, status, + num_tx_since_rx, jiffies - lp->last_rx_time, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); dev->stats.rx_missed_errors++; hardware_init(dev); @@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev) read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8f1623bf2134..81f18a833527 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); rtl_irq_enable(tp, enable_mask); mmiowb(); @@ -7755,7 +7755,7 @@ err_pm_runtime_put: goto out; } -static struct rtnl_link_stats64 * +static void rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); @@ -7809,8 +7809,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) le16_to_cpu(tp->tc_offset.tx_aborted); pm_runtime_put_noidle(&pdev->dev); - - return stats; } static void rtl8169_net_suspend(struct net_device *dev) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index f1109661a533..0525bd696d5d 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -76,6 +76,7 @@ enum ravb_reg { CDAR20 = 0x0060, CDAR21 = 0x0064, ESR = 0x0088, + APSR = 0x008C, /* R-Car Gen3 only */ RCR = 0x0090, RQC0 = 0x0094, RQC1 = 0x0098, @@ -248,6 +249,15 @@ enum ESR_BIT { ESR_EIL = 0x00001000, }; +/* APSR */ +enum APSR_BIT { + APSR_MEMS = 0x00000002, + APSR_CMSW = 0x00000010, + APSR_DM = 0x00006000, /* Undocumented? */ + APSR_DM_RDM = 0x00002000, + APSR_DM_TDM = 0x00004000, +}; + /* RCR */ enum RCR_BIT { RCR_EFFS = 0x00000001, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 301f48755093..8cfc4a54f2dc 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sys_soc.h> #include <asm/div64.h> @@ -988,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev) phy_print_status(phydev); } +static const struct soc_device_attribute r8a7795es10[] = { + { .soc_id = "r8a7795", .revision = "ES1.0", }, + { /* sentinel */ } +}; + /* PHY init function */ static int ravb_phy_init(struct net_device *ndev) { @@ -1023,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev) goto err_deregister_fixed_link; } - /* This driver only support 10/100Mbit speeds on Gen3 + /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0 * at this time. */ - if (priv->chip_id == RCAR_GEN3) { + if (soc_device_match(r8a7795es10)) { err = phy_set_max_speed(phydev, SPEED_100); if (err) { netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); @@ -1920,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev) } } +/* Set tx and rx clock internal delay modes */ +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int set = 0; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) + set |= APSR_DM_RDM; + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + set |= APSR_DM_TDM; + + ravb_modify(ndev, APSR, APSR_DM, set); +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -2032,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, @@ -2168,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + if (priv->chip_id != RCAR_GEN2) + ravb_set_delay_mode(ndev); + /* Restore descriptor base address table */ ravb_write(ndev, priv->desc_bat_dma, DBAT); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f729a6b43958..54248775f227 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,9 +1,9 @@ /* SuperH Ethernet device driver * - * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2016 Cogent Embedded, Inc. + * Copyright (C) 2013-2017 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -518,12 +518,19 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xe77f009f, + .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP | + EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP | + EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .no_psr = 1, @@ -535,9 +542,8 @@ static struct sh_eth_cpu_data r7s72100_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, - .shift_rd0 = 1, }; static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) @@ -557,12 +563,19 @@ static struct sh_eth_cpu_data r8a7740_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000070f, .apr = 1, @@ -574,10 +587,10 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, - .hw_crc = 1, + .hw_checksum = 1, .tsu = 1, .select_mii = 1, - .shift_rd0 = 1, + .magic = 1, }; /* There is CPU dependent code */ @@ -604,12 +617,16 @@ static struct sh_eth_cpu_data r8a777x_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .apr = 1, @@ -625,14 +642,19 @@ static struct sh_eth_cpu_data r8a779x_data = { .register_type = SH_ETH_REG_FAST_RCAR, - .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, - .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, .trscer_err_mask = DESC_I_RINT8, @@ -642,6 +664,7 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, + .magic = 1, }; #endif /* CONFIG_OF */ @@ -668,12 +691,16 @@ static struct sh_eth_cpu_data sh7724_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = 0x01ff009f, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -704,12 +731,18 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .irq_flags = IRQF_SHARED, .apr = 1, @@ -772,12 +805,19 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .fdr_value = 0x0000072f, .irq_flags = IRQF_SHARED, @@ -803,12 +843,18 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | - EESR_TDE | EESR_ECI, + EESR_TDE, .apr = 1, .mpr = 1, @@ -818,9 +864,9 @@ static struct sh_eth_cpu_data sh7734_data = { .no_trimd = 1, .no_ade = 1, .tsu = 1, - .hw_crc = 1, + .hw_checksum = 1, .select_mii = 1, - .shift_rd0 = 1, + .magic = 1, }; /* SH7763 */ @@ -833,12 +879,17 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | - EESR_ECI, + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .apr = 1, .mpr = 1, @@ -849,12 +900,20 @@ static struct sh_eth_cpu_data sh7763_data = { .no_ade = 1, .tsu = 1, .irq_flags = IRQF_SHARED, + .magic = 1, }; static struct sh_eth_cpu_data sh7619_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .apr = 1, .mpr = 1, @@ -865,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = { static struct sh_eth_cpu_data sh771x_data = { .register_type = SH_ETH_REG_FAST_SH3_SH2, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP | + EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP | + EESIPR_CEEFIP | EESIPR_CELFIP | + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, }; @@ -936,7 +1002,7 @@ static int sh_eth_reset(struct net_device *ndev) sh_eth_write(ndev, 0x0, RDFFR); /* Reset HW CRC register */ - if (mdp->cd->hw_crc) + if (mdp->cd->hw_checksum) sh_eth_write(ndev, 0x0, CSMR); /* Select MII mode */ @@ -1421,7 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ - if (mdp->cd->shift_rd0) + if (mdp->cd->hw_checksum) desc_status >>= 16; skb = mdp->rx_skbuff[entry]; @@ -1528,44 +1594,46 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev) sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } -/* error control function */ -static void sh_eth_error(struct net_device *ndev, u32 intr_status) +/* E-MAC interrupt handler */ +static void sh_eth_emac_interrupt(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 felic_stat; u32 link_stat; - u32 mask; - if (intr_status & EESR_ECI) { - felic_stat = sh_eth_read(ndev, ECSR); - sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ - if (felic_stat & ECSR_ICD) - ndev->stats.tx_carrier_errors++; - if (felic_stat & ECSR_LCHNG) { - /* Link Changed */ - if (mdp->cd->no_psr || mdp->no_ether_link) { - goto ignore_link; - } else { - link_stat = (sh_eth_read(ndev, PSR)); - if (mdp->ether_link_active_low) - link_stat = ~link_stat; - } - if (!(link_stat & PHY_ST_LINK)) { - sh_eth_rcv_snd_disable(ndev); - } else { - /* Link Up */ - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); - /* clear int */ - sh_eth_modify(ndev, ECSR, 0, 0); - sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, - DMAC_M_ECI); - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - } + felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_MPD) + pm_wakeup_event(&mdp->pdev->dev, 0); + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) + return; + link_stat = sh_eth_read(ndev, PSR); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + if (!(link_stat & PHY_ST_LINK)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0); + /* clear int */ + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); } } +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 mask; -ignore_link: if (intr_status & EESR_TWB) { /* Unused write back interrupt */ if (intr_status & EESR_TABT) { /* Transmit Abort int */ @@ -1646,14 +1714,16 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) /* Get interrupt status */ intr_status = sh_eth_read(ndev, EESR); - /* Mask it with the interrupt mask, forcing ECI interrupt to be always - * enabled since it's the one that comes thru regardless of the mask, - * and we need to fully handle it in sh_eth_error() in order to quench - * it as it doesn't get cleared by just writing 1 to the ECI bit... + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_emac_interrupt() in order + * to quench it as it doesn't get cleared by just writing 1 to the ECI + * bit... */ intr_enable = sh_eth_read(ndev, EESIPR); - intr_status &= intr_enable | DMAC_M_ECI; - if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) + intr_status &= intr_enable | EESIPR_ECIIP; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | + cd->eesr_err_check)) ret = IRQ_HANDLED; else goto out; @@ -1685,6 +1755,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) netif_wake_queue(ndev); } + /* E-MAC interrupt */ + if (intr_status & EESR_ECI) + sh_eth_emac_interrupt(ndev); + if (intr_status & cd->eesr_err_check) { /* Clear error interrupts */ sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); @@ -1989,7 +2063,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_reg(MAFCR); if (cd->rtrate) add_reg(RTRATE); - if (cd->hw_crc) + if (cd->hw_checksum) add_reg(CSMR); if (cd->select_mii) add_reg(RMII_MII); @@ -2201,6 +2275,33 @@ static int sh_eth_set_ringparam(struct net_device *ndev, return 0; } +static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (mdp->cd->magic && mdp->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); + + return 0; +} + static const struct ethtool_ops sh_eth_ethtool_ops = { .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, @@ -2215,6 +2316,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .set_ringparam = sh_eth_set_ringparam, .get_link_ksettings = sh_eth_get_link_ksettings, .set_link_ksettings = sh_eth_set_link_ksettings, + .get_wol = sh_eth_get_wol, + .set_wol = sh_eth_set_wol, }; /* network device open function */ @@ -3017,6 +3120,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + mdp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mdp->clk)) + mdp->clk = NULL; + ndev->base_addr = res->start; spin_lock_init(&mdp->lock); @@ -3111,6 +3219,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; + if (mdp->cd->magic && mdp->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* print device information */ netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -3150,15 +3261,67 @@ static int sh_eth_drv_remove(struct platform_device *pdev) #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP +static int sh_eth_wol_setup(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* Only allow ECI interrupts */ + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, EESIPR_ECIIP, EESIPR); + + /* Enable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(mdp->clk); + + return enable_irq_wake(ndev->irq); +} + +static int sh_eth_wol_restore(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + napi_enable(&mdp->napi); + + /* Disable MagicPacket */ + sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0); + + /* The device needs to be reset to restore MagicPacket logic + * for next wakeup. If we close and open the device it will + * both be reset and all registers restored. This is what + * happens during suspend and resume without WoL enabled. + */ + ret = sh_eth_close(ndev); + if (ret < 0) + return ret; + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(mdp->clk); + + return disable_irq_wake(ndev->irq); +} + static int sh_eth_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (mdp->wol_enabled) + ret = sh_eth_wol_setup(ndev); + else ret = sh_eth_close(ndev); - } return ret; } @@ -3166,14 +3329,21 @@ static int sh_eth_suspend(struct device *dev) static int sh_eth_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); + struct sh_eth_private *mdp = netdev_priv(ndev); int ret = 0; - if (netif_running(ndev)) { + if (!netif_running(ndev)) + return 0; + + if (mdp->wol_enabled) + ret = sh_eth_wol_restore(ndev); + else ret = sh_eth_open(ndev); - if (ret < 0) - return ret; - netif_device_attach(ndev); - } + + if (ret < 0) + return ret; + + netif_device_attach(ndev); return ret; } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index d050f37f3e0f..a6753ccba711 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -265,22 +265,38 @@ enum EESR_BIT { EESR_RTO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ - EESR_TFE | EESR_TDE | EESR_ECI) + EESR_TFE | EESR_TDE) /* EESIPR */ -enum DMAC_IM_BIT { - DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000, - DMAC_M_RABT = 0x02000000, - DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000, - DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000, - DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000, - DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000, - DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800, - DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200, - DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080, - DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008, - DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002, - DMAC_M_RINT1 = 0x00000001, +enum EESIPR_BIT { + EESIPR_TWB1IP = 0x80000000, + EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */ + EESIPR_TC1IP = 0x20000000, + EESIPR_TUCIP = 0x10000000, + EESIPR_ROCIP = 0x08000000, + EESIPR_TABTIP = 0x04000000, + EESIPR_RABTIP = 0x02000000, + EESIPR_RFCOFIP = 0x01000000, + EESIPR_ADEIP = 0x00800000, + EESIPR_ECIIP = 0x00400000, + EESIPR_FTCIP = 0x00200000, /* same as TC0IP */ + EESIPR_TDEIP = 0x00100000, + EESIPR_TFUFIP = 0x00080000, + EESIPR_FRIP = 0x00040000, + EESIPR_RDEIP = 0x00020000, + EESIPR_RFOFIP = 0x00010000, + EESIPR_CNDIP = 0x00000800, + EESIPR_DLCIP = 0x00000400, + EESIPR_CDIP = 0x00000200, + EESIPR_TROIP = 0x00000100, + EESIPR_RMAFIP = 0x00000080, + EESIPR_CEEFIP = 0x00000040, + EESIPR_CELFIP = 0x00000020, + EESIPR_RRFIP = 0x00000010, + EESIPR_RTLFIP = 0x00000008, + EESIPR_RTSFIP = 0x00000004, + EESIPR_PREIP = 0x00000002, + EESIPR_CERFIP = 0x00000001, }; /* Receive descriptor 0 bits */ @@ -339,7 +355,7 @@ enum FELIC_MODE_BIT { ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, - ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, }; @@ -488,11 +504,11 @@ struct sh_eth_cpu_data { unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ - unsigned hw_crc:1; /* E-DMAC have CSMR */ + unsigned hw_checksum:1; /* E-DMAC has CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ - unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ }; struct sh_eth_private { @@ -501,6 +517,7 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; + struct clk *clk; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; @@ -529,6 +546,7 @@ struct sh_eth_private { unsigned no_ether_link:1; unsigned ether_link_active_low:1; unsigned is_opened:1; + unsigned wol_enabled:1; }; static inline void sh_eth_soft_swap(char *src, int len) diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 7c450b5a1138..0f63a44a955d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget) } if (credits < budget) - napi_complete(napi); + napi_complete_done(napi, credits); rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index cddcff5a00a7..d54490d3f7ad 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget) work_done = sxgbe_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); } @@ -1706,11 +1706,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) * This function is a driver entry point whenever ifconfig command gets * executed to see device statistics. Statistics are number of * bytes sent or received, errors occurred etc. - * Return value: - * This function returns various statistical information of device. */ -static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void sxgbe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct sxgbe_priv_data *priv = netdev_priv(dev); void __iomem *ioaddr = priv->ioaddr; @@ -1761,8 +1759,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, SXGBE_MMC_TXUFLWHI_GBCNT_REG); writel(0, ioaddr + SXGBE_MMC_CTL_REG); spin_unlock(&priv->stats_lock); - - return stats; } /* sxgbe_set_features - entry point to set offload features of the device. diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 5eb0e684fd76..0475f1831b92 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -60,15 +60,33 @@ struct efx_ef10_vlan { u16 vid; }; +enum efx_ef10_default_filters { + EFX_EF10_BCAST, + EFX_EF10_UCDEF, + EFX_EF10_MCDEF, + EFX_EF10_VXLAN4_UCDEF, + EFX_EF10_VXLAN4_MCDEF, + EFX_EF10_VXLAN6_UCDEF, + EFX_EF10_VXLAN6_MCDEF, + EFX_EF10_NVGRE4_UCDEF, + EFX_EF10_NVGRE4_MCDEF, + EFX_EF10_NVGRE6_UCDEF, + EFX_EF10_NVGRE6_MCDEF, + EFX_EF10_GENEVE4_UCDEF, + EFX_EF10_GENEVE4_MCDEF, + EFX_EF10_GENEVE6_UCDEF, + EFX_EF10_GENEVE6_MCDEF, + + EFX_EF10_NUM_DEFAULT_FILTERS +}; + /* Per-VLAN filters information */ struct efx_ef10_filter_vlan { struct list_head list; u16 vid; u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; - u16 ucdef; - u16 bcast; - u16 mcdef; + u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; }; struct efx_ef10_dev_addr { @@ -78,7 +96,7 @@ struct efx_ef10_dev_addr { struct efx_ef10_filter_table { /* The MCDI match masks supported by this fw & hw, in order of priority */ u32 rx_match_mcdi_flags[ - MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; unsigned int rx_match_count; struct { @@ -197,11 +215,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); - if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { nic_data->datapath_caps2 = MCDI_DWORD(outbuf, GET_CAPABILITIES_V2_OUT_FLAGS2); - else + nic_data->piobuf_size = MCDI_WORD(outbuf, + GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); + } else { nic_data->datapath_caps2 = 0; + nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; + } /* record the DPCPU firmware IDs to determine VEB vswitching support. */ @@ -547,7 +569,6 @@ static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; - struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we @@ -637,7 +658,6 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc < 0) goto fail5; efx->port_num = rc; - net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) @@ -825,8 +845,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) offset = ((efx->tx_channel_offset + efx->n_tx_channels - tx_queue->channel->channel - 1) * efx_piobuf_size); - index = offset / ER_DZ_TX_PIOBUF_SIZE; - offset = offset % ER_DZ_TX_PIOBUF_SIZE; + index = offset / nic_data->piobuf_size; + offset = offset % nic_data->piobuf_size; /* When the host page size is 4K, the first * host page in the WC mapping may be within @@ -1161,14 +1181,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * functions of the controller. */ if (efx_piobuf_size != 0 && - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= efx->n_tx_channels) { unsigned int n_piobufs = DIV_ROUND_UP(efx->n_tx_channels, - ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + nic_data->piobuf_size / efx_piobuf_size); rc = efx_ef10_alloc_piobufs(efx, n_piobufs); - if (rc) + if (rc == -ENOSPC) + netif_dbg(efx, probe, efx->net_dev, + "out of PIO buffers; cannot allocate more\n"); + else if (rc == -EPERM) + netif_dbg(efx, probe, efx->net_dev, + "not permitted to allocate PIO buffers\n"); + else if (rc) netif_err(efx, probe, efx->net_dev, "failed to allocate PIO buffers (%d)\n", rc); else @@ -1315,15 +1341,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) efx_ef10_free_piobufs(efx); } - /* Log an error on failure, but this is non-fatal */ - if (rc) + /* Log an error on failure, but this is non-fatal. + * Permission errors are less important - we've presumably + * had the PIO buffer licence removed. + */ + if (rc == -EPERM) + netif_dbg(efx, drv, efx->net_dev, + "not permitted to restore PIO buffers\n"); + else if (rc) netif_err(efx, drv, efx->net_dev, "failed to restore PIO buffers (%d)\n", rc); nic_data->must_restore_piobufs = false; } /* don't fail init if RSS setup doesn't work */ - rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); + rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = (rc == 0); return 0; @@ -2360,7 +2392,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) /* Create TX descriptor ring entry */ if (buffer->flags & EFX_TX_BUF_OPTION) { *txd = buffer->option; + if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) + /* PIO descriptor */ + tx_queue->packet_write_count = tx_queue->write_count; } else { + tx_queue->packet_write_count = tx_queue->write_count; BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_3( *txd, @@ -2529,7 +2565,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) } static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); @@ -2540,6 +2576,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + /* This iterates over the length of efx->rx_indir_table, but copies + * bytes from rx_indir_table. That's because the latter is a pointer + * rather than an array, but should have the same length. + * The efx->rx_hash_key loop below is similar. + */ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = @@ -2555,8 +2596,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) - MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = - efx->rx_hash_key[i]; + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, sizeof(keybuf), NULL, 0, NULL); @@ -2589,7 +2629,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, } static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; @@ -2608,7 +2649,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, } rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, - rx_indir_table); + rx_indir_table, key); if (rc != 0) goto fail2; @@ -2619,6 +2660,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, if (rx_indir_table != efx->rx_indir_table) memcpy(efx->rx_indir_table, rx_indir_table, sizeof(efx->rx_indir_table)); + if (key != efx->rx_hash_key) + memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); + return 0; fail2: @@ -2629,15 +2673,69 @@ fail1: return rc; } +static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); + size_t outlen; + int rc, i; + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != + MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + return -ENOENT; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), + tablebuf, sizeof(tablebuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = MCDI_PTR(tablebuf, + RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, + nic_data->rx_rss_context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), + keybuf, sizeof(keybuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + efx->rx_hash_key[i] = MCDI_PTR( + keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; + + return 0; +} + static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, + const u8 *key) { int rc; if (efx->rss_spread == 1) return 0; - rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); + if (!key) + key = efx->rx_hash_key; + + rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); if (rc == -ENOBUFS && !user) { unsigned context_size; @@ -2675,6 +2773,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table + __attribute__ ((unused)), + const u8 *key __attribute__ ((unused))) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -3510,6 +3610,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, table->entry[filter_idx].spec = (unsigned long)spec | flags; } +static void +efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf) +{ + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); + u32 match_fields = 0, uc_match, mc_match; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ +#define COPY_VALUE(value, mcdi_field) \ + do { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(value)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &value, sizeof(value)); \ + } while (0) +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + COPY_VALUE(spec->gen_field, mcdi_field); \ + } + /* Handle encap filters first. They will always be mismatch + * (unknown UC or MC) filters + */ + if (encap_type) { + /* ether_type and outer_ip_proto need to be variables + * because COPY_VALUE wants to memcpy them + */ + __be16 ether_type = + htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? + ETH_P_IPV6 : ETH_P_IP); + u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; + u8 outer_ip_proto; + + switch (encap_type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; + /* fallthrough */ + case EFX_ENCAP_TYPE_GENEVE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_UDP; + COPY_VALUE(outer_ip_proto, IP_PROTO); + /* We always need to set the type field, even + * though we're not matching on the TNI. + */ + MCDI_POPULATE_DWORD_1(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + vni_type); + break; + case EFX_ENCAP_TYPE_NVGRE: + COPY_VALUE(ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_GRE; + COPY_VALUE(outer_ip_proto, IP_PROTO); + break; + default: + WARN_ON(1); + } + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD +#undef COPY_VALUE + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); +} + static void efx_ef10_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, @@ -3518,7 +3716,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, struct efx_ef10_nic_data *nic_data = efx->nic_data; u32 flags = spec->flags; - memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); /* Remove RSS flag if we don't have an RSS context. */ if (flags & EFX_FILTER_FLAG_RX_RSS && @@ -3531,46 +3729,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_REPLACE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); } else { - u32 match_fields = 0; - - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, - efx_ef10_filter_is_exclusive(spec) ? - MC_CMD_FILTER_OP_IN_OP_INSERT : - MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); - - /* Convert match flags and values. Unlike almost - * everything else in MCDI, these fields are in - * network byte order. - */ - if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) - match_fields |= - is_multicast_ether_addr(spec->loc_mac) ? - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : - 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; -#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ - if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ - match_fields |= \ - 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN; \ - BUILD_BUG_ON( \ - MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ - sizeof(spec->gen_field)); \ - memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ - &spec->gen_field, sizeof(spec->gen_field)); \ - } - COPY_FIELD(REM_HOST, rem_host, SRC_IP); - COPY_FIELD(LOC_HOST, loc_host, DST_IP); - COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); - COPY_FIELD(REM_PORT, rem_port, SRC_PORT); - COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); - COPY_FIELD(LOC_PORT, loc_port, DST_PORT); - COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); - COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); - COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); - COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); -#undef COPY_FIELD - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, - match_fields); + efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); } MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); @@ -3599,8 +3758,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, bool replacing) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); int rc; efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); @@ -3615,37 +3774,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx, static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); unsigned int match_flags = spec->match_flags; + unsigned int uc_match, mc_match; u32 mcdi_flags = 0; - if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { - match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; - mcdi_flags |= - is_multicast_ether_addr(spec->loc_mac) ? - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : - (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); - } - -#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ - unsigned int old_match_flags = match_flags; \ +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ + unsigned int old_match_flags = match_flags; \ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ if (match_flags != old_match_flags) \ mcdi_flags |= \ - (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + (1 << ((encap) ? \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ + mcdi_field ## _LBN)); \ } - MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); - MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); - MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); - MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); - MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); - MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); - MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); + /* inner or outer based on encap type */ + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); + /* always outer */ + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); #undef MAP_FILTER_TO_MCDI_FLAG + /* special handling for encap type, and mismatch */ + if (encap_type) { + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; + mcdi_flags |= + (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + } + /* Did we map them all? */ WARN_ON_ONCE(match_flags); @@ -4305,29 +4485,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, #endif /* CONFIG_RFS_ACCEL */ -static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) { int match_flags = 0; -#define MAP_FLAG(gen_flag, mcdi_field) { \ +#define MAP_FLAG(gen_flag, mcdi_field) do { \ u32 old_mcdi_flags = mcdi_flags; \ - mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ - mcdi_field ## _LBN); \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ if (mcdi_flags != old_mcdi_flags) \ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } while (0) + + if (encap) { + /* encap filters must specify encap type */ + match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + /* and imply ethertype and ip proto */ + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + /* VLAN tags refer to the outer packet */ + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + /* everything else refers to the inner packet */ + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, IFRM_SRC_IP); + MAP_FLAG(LOC_HOST, IFRM_DST_IP); + MAP_FLAG(REM_MAC, IFRM_SRC_MAC); + MAP_FLAG(REM_PORT, IFRM_SRC_PORT); + MAP_FLAG(LOC_MAC, IFRM_DST_MAC); + MAP_FLAG(LOC_PORT, IFRM_DST_PORT); + MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); + MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); + } else { + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); } - MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); - MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); - MAP_FLAG(REM_HOST, SRC_IP); - MAP_FLAG(LOC_HOST, DST_IP); - MAP_FLAG(REM_MAC, SRC_MAC); - MAP_FLAG(REM_PORT, SRC_PORT); - MAP_FLAG(LOC_MAC, DST_MAC); - MAP_FLAG(LOC_PORT, DST_PORT); - MAP_FLAG(ETHER_TYPE, ETHER_TYPE); - MAP_FLAG(INNER_VID, INNER_VLAN); - MAP_FLAG(OUTER_VID, OUTER_VLAN); - MAP_FLAG(IP_PROTO, IP_PROTO); #undef MAP_FLAG /* Did we map them all? */ @@ -4354,6 +4559,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) } static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + bool encap, enum efx_filter_match_flags match_flags) { unsigned int match_pri; @@ -4362,7 +4568,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) { - mf = efx_ef10_filter_match_flags_from_mcdi( + mf = efx_ef10_filter_match_flags_from_mcdi(encap, table->rx_match_mcdi_flags[match_pri]); if (mf == match_flags) return true; @@ -4371,39 +4577,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, return false; } -static int efx_ef10_filter_table_probe(struct efx_nic *efx) +static int +efx_ef10_filter_table_probe_matches(struct efx_nic *efx, + struct efx_ef10_filter_table *table, + bool encap) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; - struct efx_ef10_filter_table *table; - struct efx_ef10_vlan *vlan; size_t outlen; int rc; - if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) - return -EINVAL; - - if (efx->filter_state) /* already probed */ - return 0; - - table = kzalloc(sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; - /* Find out which RX filter types are supported, and their priorities */ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; + pd_match_count = MCDI_VAR_ARRAY_LEN( outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); - table->rx_match_count = 0; for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { u32 mcdi_flags = @@ -4411,7 +4608,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) outbuf, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, pd_match_pri); - rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); if (rc < 0) { netif_dbg(efx, probe, efx->net_dev, "%s: fw flags %#x pri %u not supported in driver\n", @@ -4426,10 +4623,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) } } + return 0; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; + int rc; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->rx_match_count = 0; + rc = efx_ef10_filter_table_probe_matches(efx, table, false); + if (rc) + goto fail; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + rc = efx_ef10_filter_table_probe_matches(efx, table, true); + if (rc) + goto fail; if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && - !(efx_ef10_filter_match_supported(table, + !(efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && - efx_ef10_filter_match_supported(table, + efx_ef10_filter_match_supported(table, false, (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { netif_info(efx, probe, net_dev, "VLAN filters are not supported in this firmware variant\n"); @@ -4475,10 +4702,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int invalid_filters = 0, failed = 0; + struct efx_ef10_filter_vlan *vlan; struct efx_filter_spec *spec; unsigned int filter_idx; - bool failed = false; - int rc; + u32 mcdi_flags; + int match_pri; + int rc, i; WARN_ON(!rwsem_is_locked(&efx->filter_sem)); @@ -4495,6 +4725,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) if (!spec) continue; + mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); + match_pri = 0; + while (match_pri < table->rx_match_count && + table->rx_match_mcdi_flags[match_pri] != mcdi_flags) + ++match_pri; + if (match_pri >= table->rx_match_count) { + invalid_filters++; + goto not_restored; + } + if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && + spec->rss_context != nic_data->rx_rss_context) + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with specific RSS context.\n"); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; spin_unlock_bh(&efx->filter_lock); @@ -4502,10 +4746,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) &table->entry[filter_idx].handle, false); if (rc) - failed = true; - + failed++; spin_lock_bh(&efx->filter_lock); + if (rc) { +not_restored: + list_for_each_entry(vlan, &table->vlan_list, list) + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) + if (vlan->default_filters[i] == filter_idx) + vlan->default_filters[i] = + EFX_EF10_FILTER_ID_INVALID; + kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { @@ -4516,9 +4767,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) spin_unlock_bh(&efx->filter_lock); + /* This can happen validly if the MC's capabilities have changed, so + * is not an error. + */ + if (invalid_filters) + netif_dbg(efx, drv, efx->net_dev, + "Did not restore %u filters that are now unsupported.\n", + invalid_filters); + if (failed) netif_err(efx, hw, efx->net_dev, - "unable to restore all filters\n"); + "unable to restore %u filters\n", failed); else nic_data->must_restore_filters = false; } @@ -4596,9 +4855,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); - efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); - efx_ef10_filter_mark_one_old(efx, &vlan->bcast); - efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); } /* Mark old filters that may need to be removed. @@ -4716,6 +4974,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ + EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, vlan->vid, baddr); @@ -4732,9 +4992,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, } return rc; } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } @@ -4743,6 +5002,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_ef10_filter_vlan *vlan, + enum efx_encap_type encap_type, bool multicast, bool rollback) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -4750,6 +5010,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; + u16 *id; filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -4760,19 +5021,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, else efx_filter_set_uc_def(&spec); + if (encap_type) { + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) + efx_filter_set_encap_type(&spec, encap_type); + else + /* don't insert encap filters on non-supporting + * platforms. ID will be left as INVALID. + */ + return 0; + } + if (vlan->vid != EFX_FILTER_VID_UNSPEC) efx_filter_set_eth_local(&spec, vlan->vid, NULL); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, - efx->net_dev, - "%scast mismatch filter insert failed rc=%d\n", - multicast ? "Multi" : "Uni", rc); + const char *um = multicast ? "Multicast" : "Unicast"; + const char *encap_name = ""; + const char *encap_ipv = ""; + + if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_VXLAN) + encap_name = "VXLAN "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_NVGRE) + encap_name = "NVGRE "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_GENEVE) + encap_name = "GENEVE "; + if (encap_type & EFX_ENCAP_FLAG_IPV6) + encap_ipv = "IPv6 "; + else if (encap_type) + encap_ipv = "IPv4 "; + + /* unprivileged functions can't insert mismatch filters + * for encapsulated or unicast traffic, so downgrade + * those warnings to debug. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + rc == -EPERM && (encap_type || !multicast), warn, + "%s%s%s mismatch filter insert failed rc=%d\n", + encap_name, encap_ipv, um, rc); } else if (multicast) { - EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); - vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); - if (!nic_data->workaround_26807) { + /* mapping from encap types to default filter IDs (multicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_MCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_MCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_MCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -4787,20 +5104,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - vlan->mcdef); - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + *id); + *id = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - EFX_WARN_ON_PARANOID(vlan->bcast != - EFX_EF10_FILTER_ID_INVALID); - vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID( + vlan->default_filters[EFX_EF10_BCAST] != + EFX_EF10_FILTER_ID_INVALID); + vlan->default_filters[EFX_EF10_BCAST] = + efx_ef10_filter_get_unsafe_id(efx, rc); } } rc = 0; } else { - EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); - vlan->ucdef = rc; + /* mapping from encap types to default filter IDs (unicast) */ + static enum efx_ef10_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_VXLAN6_UCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_NVGRE6_UCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_EF10_GENEVE6_UCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_EF10_BCAST != 0); + if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); + *id = rc; rc = 0; } return rc; @@ -4923,7 +5264,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Insert/renew unicast filters */ if (table->uc_promisc) { - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, + false, false); efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to @@ -4931,8 +5273,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, * our individual unicast filters. */ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) - efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + false, false); } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + false, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + false, false); /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove @@ -4946,7 +5305,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); efx_ef10_filter_insert_addr_list(efx, vlan, @@ -4956,7 +5317,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, false); efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { @@ -4969,11 +5332,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + if (efx_ef10_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + true, false); + efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + true, false); } /* Caller must hold efx->filter_sem for read if race against @@ -5060,9 +5440,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; - vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; - vlan->bcast = EFX_EF10_FILTER_ID_INVALID; - vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; list_add_tail(&vlan->list, &table->vlan_list); @@ -5089,9 +5468,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mc[i]); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); - efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) + if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->default_filters[i]); kfree(vlan); } @@ -5540,6 +5920,20 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +static int efx_ef10_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} + static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) { if (proto != htons(ETH_P_8021Q)) @@ -5609,6 +6003,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5647,11 +6042,11 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, .vswitching_remove = efx_ef10_vswitching_remove_vf, - .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, #endif .get_mac_address = efx_ef10_get_mac_address_vf, .set_mac_address = efx_ef10_set_mac_address, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, @@ -5666,6 +6061,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; const struct efx_nic_type efx_hunt_a0_nic_type = { @@ -5716,6 +6112,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -5776,6 +6173,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .set_mac_address = efx_ef10_set_mac_address, .tso_versions = efx_ef10_tso_versions, + .get_phys_port_id = efx_ef10_get_phys_port_id, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, @@ -5783,6 +6181,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, + .option_descriptors = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, @@ -5790,4 +6189,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, }; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index a949b9d27329..ed4b14283461 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -6,6 +6,7 @@ * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ +#include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/module.h> #include "net_driver.h" @@ -554,7 +555,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) return 0; fail: - memset(vf->mac, 0, ETH_ALEN); + eth_zero_addr(vf->mac); return rc; } @@ -760,17 +761,3 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, return 0; } - -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid) -{ - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - if (!is_valid_ether_addr(nic_data->port_id)) - return -EOPNOTSUPP; - - ppid->id_len = ETH_ALEN; - memcpy(ppid->id, nic_data->port_id, ppid->id_len); - - return 0; -} diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 9ceb7ef0a210..2aa444ed42de 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -56,9 +56,6 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, int link_state); -int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); - int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 5a5dcad8c49a..fcd4eeecfef4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -308,9 +308,6 @@ static int efx_poll(struct napi_struct *napi, int budget) struct efx_nic *efx = channel->efx; int spent; - if (!efx_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -331,11 +328,10 @@ static int efx_poll(struct napi_struct *napi, int budget) * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); efx_nic_eventq_read_ack(channel); } - efx_channel_unlock_napi(channel); return spent; } @@ -391,7 +387,6 @@ void efx_start_eventq(struct efx_channel *channel) channel->enabled = true; smp_wmb(); - efx_channel_enable(channel); napi_enable(&channel->napi_str); efx_nic_eventq_read_ack(channel); } @@ -403,8 +398,6 @@ void efx_stop_eventq(struct efx_channel *channel) return; napi_disable(&channel->napi_str); - while (!efx_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -2088,7 +2081,6 @@ static void efx_init_napi_channel(struct efx_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); - efx_channel_busy_poll_init(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -2138,37 +2130,6 @@ static void efx_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int efx_busy_poll(struct napi_struct *napi) -{ - struct efx_channel *channel = - container_of(napi, struct efx_channel, napi_str); - struct efx_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!efx_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - efx_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - efx_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2219,16 +2180,14 @@ int efx_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ @@ -2336,6 +2295,27 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } +static int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + +static int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} + static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2376,15 +2356,13 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, - .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif + .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = efx_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif @@ -3585,3 +3563,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); +MODULE_VERSION(EFX_DRIVER_VERSION); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 18ebaea44e82..adddf70780ad 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1278,15 +1278,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table); } +static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (indir) memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + if (key) + memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size); return 0; } @@ -1295,14 +1309,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, { struct efx_nic *efx = netdev_priv(net_dev); - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + if (!indir && !key) return 0; - return efx->type->rx_push_rss_config(efx, true, indir); + if (!key) + key = efx->rx_hash_key; + if (!indir) + indir = efx->rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); } static int efx_ethtool_get_ts_info(struct net_device *net_dev, @@ -1377,6 +1395,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, .get_ts_info = efx_ethtool_get_ts_info, diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 5c5cb3c4c12e..f5e5cd1659a1 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -304,9 +304,6 @@ static int ef4_poll(struct napi_struct *napi, int budget) struct ef4_nic *efx = channel->efx; int spent; - if (!ef4_channel_lock_napi(channel)) - return budget; - netif_vdbg(efx, intr, efx->net_dev, "channel %d NAPI poll executing on CPU %d\n", channel->channel, raw_smp_processor_id()); @@ -327,11 +324,10 @@ static int ef4_poll(struct napi_struct *napi, int budget) * since ef4_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ - napi_complete(napi); + napi_complete_done(napi, spent); ef4_nic_eventq_read_ack(channel); } - ef4_channel_unlock_napi(channel); return spent; } @@ -387,7 +383,6 @@ void ef4_start_eventq(struct ef4_channel *channel) channel->enabled = true; smp_wmb(); - ef4_channel_enable(channel); napi_enable(&channel->napi_str); ef4_nic_eventq_read_ack(channel); } @@ -399,8 +394,6 @@ void ef4_stop_eventq(struct ef4_channel *channel) return; napi_disable(&channel->napi_str); - while (!ef4_channel_disable(channel)) - usleep_range(1000, 20000); channel->enabled = false; } @@ -986,7 +979,7 @@ void ef4_mac_reconfigure(struct ef4_nic *efx) /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed - * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC * through ef4_monitor(). * * Callers must hold the mac_lock @@ -2029,7 +2022,6 @@ static void ef4_init_napi_channel(struct ef4_channel *channel) channel->napi_dev = efx->net_dev; netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, napi_weight); - ef4_channel_busy_poll_init(channel); } static void ef4_init_napi(struct ef4_nic *efx) @@ -2079,37 +2071,6 @@ static void ef4_netpoll(struct net_device *net_dev) #endif -#ifdef CONFIG_NET_RX_BUSY_POLL -static int ef4_busy_poll(struct napi_struct *napi) -{ - struct ef4_channel *channel = - container_of(napi, struct ef4_channel, napi_str); - struct ef4_nic *efx = channel->efx; - int budget = 4; - int old_rx_packets, rx_packets; - - if (!netif_running(efx->net_dev)) - return LL_FLUSH_FAILED; - - if (!ef4_channel_try_lock_poll(channel)) - return LL_FLUSH_BUSY; - - old_rx_packets = channel->rx_queue.rx_packets; - ef4_process_channel(channel, budget); - - rx_packets = channel->rx_queue.rx_packets - old_rx_packets; - - /* There is no race condition with NAPI here. - * NAPI will automatically be rescheduled if it yielded during busy - * polling, because it was not able to take the lock and thus returned - * the full budget. - */ - ef4_channel_unlock_poll(channel); - - return rx_packets; -} -#endif - /************************************************************************** * * Kernel net device interface @@ -2158,16 +2119,14 @@ int ef4_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) +static void ef4_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct ef4_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); - - return stats; } /* Context: netif_tx_lock held, BHs disabled. */ @@ -2291,9 +2250,6 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_poll_controller = ef4_netpoll, #endif .ndo_setup_tc = ef4_setup_tc, -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = ef4_busy_poll, -#endif #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = ef4_filter_rfs, #endif @@ -3348,3 +3304,4 @@ MODULE_AUTHOR("Solarflare Communications and " MODULE_DESCRIPTION("Solarflare Falcon network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ef4_pci_table); +MODULE_VERSION(EF4_DRIVER_VERSION); diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 8e1929b01a32..56049157a5af 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -115,44 +115,47 @@ static int ef4_ethtool_phys_id(struct net_device *net_dev, } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_get_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); - efx->phy_op->get_settings(efx, ecmd); + efx->phy_op->get_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); /* Both MACs support pause frames (bidirectional and respond-only) */ - ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); if (LOOPBACK_INTERNAL(efx)) { - ethtool_cmd_speed_set(ecmd, link_state->speed); - ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ -static int ef4_ethtool_set_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int +ef4_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { struct ef4_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ - if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && - (ecmd->duplex != DUPLEX_FULL)) { + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); - rc = efx->phy_op->set_settings(efx, ecmd); + rc = efx->phy_op->set_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); return rc; } @@ -1310,8 +1313,6 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev, } const struct ethtool_ops ef4_ethtool_ops = { - .get_settings = ef4_ethtool_get_settings, - .set_settings = ef4_ethtool_set_settings, .get_drvinfo = ef4_ethtool_get_drvinfo, .get_regs_len = ef4_ethtool_get_regs_len, .get_regs = ef4_ethtool_get_regs, @@ -1340,4 +1341,6 @@ const struct ethtool_ops ef4_ethtool_ops = { .set_rxfh = ef4_ethtool_set_rxfh, .get_module_info = ef4_ethtool_get_module_info, .get_module_eeprom = ef4_ethtool_get_module_eeprom, + .get_link_ksettings = ef4_ethtool_get_link_ksettings, + .set_link_ksettings = ef4_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.c b/drivers/net/ethernet/sfc/falcon/mdio_10g.c index e7d7c09296aa..ee0713f03d01 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.c +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.c @@ -226,33 +226,45 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, } /** - * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO. + * ef4_mdio_set_link_ksettings - Set (some of) the PHY settings over MDIO. * @efx: Efx NIC - * @ecmd: New settings + * @cmd: New settings */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET }; - - efx->phy_op->get_settings(efx, &prev); - - if (ecmd->advertising == prev.advertising && - ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) && - ecmd->duplex == prev.duplex && - ecmd->port == prev.port && - ecmd->autoneg == prev.autoneg) + struct ethtool_link_ksettings prev = { + .base.cmd = ETHTOOL_GLINKSETTINGS + }; + u32 prev_advertising, advertising; + u32 prev_supported; + + efx->phy_op->get_link_ksettings(efx, &prev); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_advertising, + prev.link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&prev_supported, + prev.link_modes.supported); + + if (advertising == prev_advertising && + cmd->base.speed == prev.base.speed && + cmd->base.duplex == prev.base.duplex && + cmd->base.port == prev.base.port && + cmd->base.autoneg == prev.base.autoneg) return 0; /* We can only change these settings for -T PHYs */ - if (prev.port != PORT_TP || ecmd->port != PORT_TP) + if (prev.base.port != PORT_TP || cmd->base.port != PORT_TP) return -EINVAL; /* Check that PHY supports these settings */ - if (!ecmd->autoneg || - (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) + if (!cmd->base.autoneg || + (advertising | SUPPORTED_Autoneg) & ~prev_supported) return -EINVAL; - ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); + ef4_link_set_advertising(efx, advertising | ADVERTISED_Autoneg); ef4_mdio_an_reconfigure(efx); return 0; } diff --git a/drivers/net/ethernet/sfc/falcon/mdio_10g.h b/drivers/net/ethernet/sfc/falcon/mdio_10g.h index 885cf7a834a6..53cb5cc4ad37 100644 --- a/drivers/net/ethernet/sfc/falcon/mdio_10g.h +++ b/drivers/net/ethernet/sfc/falcon/mdio_10g.h @@ -83,7 +83,8 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power, unsigned int mmd_mask); /* Set (some of) the PHY settings over MDIO */ -int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); +int ef4_mdio_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); /* Push advertising flags and restart autonegotiation */ void ef4_mdio_an_reconfigure(struct ef4_nic *efx); diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h index 210b28f7d2a1..37a8bdf32206 100644 --- a/drivers/net/ethernet/sfc/falcon/net_driver.h +++ b/drivers/net/ethernet/sfc/falcon/net_driver.h @@ -448,131 +448,6 @@ struct ef4_channel { struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum ef4_channel_busy_poll_state { - EF4_CHANNEL_STATE_IDLE = 0, - EF4_CHANNEL_STATE_NAPI = BIT(0), - EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EF4_CHANNEL_STATE_NAPI_REQ = BIT(1), - EF4_CHANNEL_STATE_POLL_BIT = 2, - EF4_CHANNEL_STATE_POLL = BIT(2), - EF4_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EF4_CHANNEL_STATE_POLL: - /* Ensure ef4_channel_try_lock_poll() wont starve us */ - set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EF4_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ - /* Make sure write has completed from ef4_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE); -} - -/* Called from ef4_busy_poll(). */ -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE, - EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ - clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in ef4_channel_busy_polling() */ - return !ef4_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_lock_napi(struct ef4_channel *channel) -{ - return true; -} - -static inline void ef4_channel_unlock_napi(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_unlock_poll(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_busy_polling(struct ef4_channel *channel) -{ - return false; -} - -static inline void ef4_channel_enable(struct ef4_channel *channel) -{ -} - -static inline bool ef4_channel_disable(struct ef4_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct ef4_msi_context - Context for each MSI * @efx: The associated NIC @@ -684,8 +559,8 @@ static inline bool ef4_link_state_equal(const struct ef4_link_state *left, * @reconfigure: Reconfigure PHY (e.g. for new link parameters) * @poll: Update @link_state and report whether it changed. * Serialised by the mac_lock. - * @get_settings: Get ethtool settings. Serialised by the mac_lock. - * @set_settings: Set ethtool settings. Serialised by the mac_lock. + * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock. + * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock. * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @test_alive: Test that PHY is 'alive' (online) @@ -700,10 +575,10 @@ struct ef4_phy_operations { void (*remove) (struct ef4_nic *efx); int (*reconfigure) (struct ef4_nic *efx); bool (*poll) (struct ef4_nic *efx); - void (*get_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); - int (*set_settings) (struct ef4_nic *efx, - struct ethtool_cmd *ecmd); + void (*get_link_ksettings)(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd); + int (*set_link_ksettings)(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd); void (*set_npage_adv) (struct ef4_nic *efx, u32); int (*test_alive) (struct ef4_nic *efx); const char *(*test_name) (struct ef4_nic *efx, unsigned int index); diff --git a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c index d29331652548..f5e0f18d4ea8 100644 --- a/drivers/net/ethernet/sfc/falcon/qt202x_phy.c +++ b/drivers/net/ethernet/sfc/falcon/qt202x_phy.c @@ -437,9 +437,10 @@ static int qt202x_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void qt202x_phy_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } static void qt202x_phy_remove(struct ef4_nic *efx) @@ -487,8 +488,8 @@ const struct ef4_phy_operations falcon_qt202x_phy_ops = { .poll = qt202x_phy_poll, .fini = ef4_port_dummy_op_void, .remove = qt202x_phy_remove, - .get_settings = qt202x_phy_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = qt202x_phy_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .get_module_eeprom = qt202x_phy_get_module_eeprom, .get_module_info = qt202x_phy_get_module_info, diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 250458cbdb4d..6a8406dc0c2b 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -674,8 +674,7 @@ void __ef4_rx_packet(struct ef4_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; - if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb && - !ef4_channel_busy_polling(channel)) + if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/falcon/tenxpress.c b/drivers/net/ethernet/sfc/falcon/tenxpress.c index acc548a1c4d6..ff9b4e2b590c 100644 --- a/drivers/net/ethernet/sfc/falcon/tenxpress.c +++ b/drivers/net/ethernet/sfc/falcon/tenxpress.c @@ -351,9 +351,6 @@ static int tenxpress_phy_reconfigure(struct ef4_nic *efx) return 0; } -static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd); - /* Poll for link state changes */ static bool tenxpress_phy_poll(struct ef4_nic *efx) { @@ -443,7 +440,8 @@ sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags) } static void -tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +tenxpress_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { u32 adv = 0, lpa = 0; int reg; @@ -455,20 +453,22 @@ tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) if (reg & MDIO_AN_10GBT_STAT_LP10G) lpa |= ADVERTISED_10000baseT_Full; - mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); + mdio45_ethtool_ksettings_get_npage(&efx->mdio, cmd, adv, lpa); /* In loopback, the PHY automatically brings up the correct interface, * but doesn't advertise the correct speed. So override it */ if (LOOPBACK_EXTERNAL(efx)) - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; } -static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static int +tenxpress_set_link_ksettings(struct ef4_nic *efx, + const struct ethtool_link_ksettings *cmd) { - if (!ecmd->autoneg) + if (!cmd->base.autoneg) return -EINVAL; - return ef4_mdio_set_settings(efx, ecmd); + return ef4_mdio_set_link_ksettings(efx, cmd); } static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising) @@ -485,8 +485,8 @@ const struct ef4_phy_operations falcon_sfx7101_phy_ops = { .poll = tenxpress_phy_poll, .fini = sfx7101_phy_fini, .remove = tenxpress_phy_remove, - .get_settings = tenxpress_get_settings, - .set_settings = tenxpress_set_settings, + .get_link_ksettings = tenxpress_get_link_ksettings, + .set_link_ksettings = tenxpress_set_link_ksettings, .set_npage_adv = sfx7101_set_npage_adv, .test_alive = ef4_mdio_test_alive, .test_name = sfx7101_test_name, diff --git a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c index 18421f5e880f..3c55fd23c271 100644 --- a/drivers/net/ethernet/sfc/falcon/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/falcon/txc43128_phy.c @@ -540,9 +540,10 @@ static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags) return rc; } -static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd) +static void txc43128_get_link_ksettings(struct ef4_nic *efx, + struct ethtool_link_ksettings *cmd) { - mdio45_ethtool_gset(&efx->mdio, ecmd); + mdio45_ethtool_ksettings_get(&efx->mdio, cmd); } const struct ef4_phy_operations falcon_txc_phy_ops = { @@ -552,8 +553,8 @@ const struct ef4_phy_operations falcon_txc_phy_ops = { .poll = txc43128_phy_poll, .fini = txc43128_phy_fini, .remove = txc43128_phy_remove, - .get_settings = txc43128_get_settings, - .set_settings = ef4_mdio_set_settings, + .get_link_ksettings = txc43128_get_link_ksettings, + .set_link_ksettings = ef4_mdio_set_link_ksettings, .test_alive = ef4_mdio_test_alive, .run_tests = txc43128_run_tests, .test_name = txc43128_test_name, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index e4ca2161af70..ba45150f53c7 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) } } +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + /* Looks at available SRAM resources and works out how many queues we * can support, and where things like descriptor caches should live. * diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index d0ed7f71ea7e..8189a1cd973f 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -27,6 +27,7 @@ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. * Used for RX default unicast and multicast/broadcast filters. * * Only some combinations are supported, depending on NIC type: @@ -54,6 +55,7 @@ enum efx_filter_match_flags { EFX_FILTER_MATCH_OUTER_VID = 0x0100, EFX_FILTER_MATCH_IP_PROTO = 0x0200, EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, }; /** @@ -98,6 +100,26 @@ enum efx_filter_flags { EFX_FILTER_FLAG_TX = 0x10, }; +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + /** * struct efx_filter_spec - specification for a hardware filter * @match_flags: Match type flags, from &enum efx_filter_match_flags @@ -118,6 +140,8 @@ enum efx_filter_flags { * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set * * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be * used to initialise the structure. The efx_filter_set_*() functions @@ -144,7 +168,8 @@ struct efx_filter_spec { __be32 rem_host[4]; __be16 loc_port; __be16 rem_port; - /* total 64 bytes */ + u32 encap_type:4; + /* total 65 bytes */ }; enum { @@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) return 0; } +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} #endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 995651341b94..24b271b9c260 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -837,11 +837,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, outbuf, outlen, outlen_actual, quiet, NULL, raw_rc); } else { - netif_printk(efx, hw, - rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x failed after proxy auth rc=%d\n", - cmd, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); if (rc == -EINTR || rc == -EIO) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); @@ -1084,10 +1082,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); - netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR, - efx->net_dev, - "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", - cmd, inlen, rc, code, err_arg); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -2057,8 +2054,8 @@ fail: /* Older firmware lacks GET_WORKAROUNDS and this isn't especially * terrifying. The call site will have to deal with it though. */ - netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, - efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1c62c1a00fca..73810d2d630c 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -208,6 +208,12 @@ struct efx_tx_buffer { * @write_count: Current write pointer * This is the number of buffers that have been added to the * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -255,6 +261,7 @@ struct efx_tx_queue { /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; + unsigned int packet_write_count; unsigned int old_read_count; unsigned int tso_bursts; unsigned int tso_long_headers; @@ -484,131 +491,6 @@ struct efx_channel { u32 sync_timestamp_minor; }; -#ifdef CONFIG_NET_RX_BUSY_POLL -enum efx_channel_busy_poll_state { - EFX_CHANNEL_STATE_IDLE = 0, - EFX_CHANNEL_STATE_NAPI = BIT(0), - EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1, - EFX_CHANNEL_STATE_NAPI_REQ = BIT(1), - EFX_CHANNEL_STATE_POLL_BIT = 2, - EFX_CHANNEL_STATE_POLL = BIT(2), - EFX_CHANNEL_STATE_DISABLE_BIT = 3, -}; - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from the device poll routine to get ownership of a channel. */ -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - unsigned long prev, old = READ_ONCE(channel->busy_poll_state); - - while (1) { - switch (old) { - case EFX_CHANNEL_STATE_POLL: - /* Ensure efx_channel_try_lock_poll() wont starve us */ - set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT, - &channel->busy_poll_state); - /* fallthrough */ - case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ: - return false; - default: - break; - } - prev = cmpxchg(&channel->busy_poll_state, old, - EFX_CHANNEL_STATE_NAPI); - if (unlikely(prev != old)) { - /* This is likely to mean we've just entered polling - * state. Go back round to set the REQ bit. - */ - old = prev; - continue; - } - return true; - } -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ - /* Make sure write has completed from efx_channel_lock_napi() */ - smp_wmb(); - WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); -} - -/* Called from efx_busy_poll(). */ -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, - EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ - clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, - &channel->busy_poll_state); -} - -/* Stop further polling or napi access. - * Returns false if the channel is currently busy polling. - */ -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); - /* Implicit barrier in efx_channel_busy_polling() */ - return !efx_channel_busy_polling(channel); -} - -#else /* CONFIG_NET_RX_BUSY_POLL */ - -static inline void efx_channel_busy_poll_init(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_lock_napi(struct efx_channel *channel) -{ - return true; -} - -static inline void efx_channel_unlock_napi(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_unlock_poll(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_busy_polling(struct efx_channel *channel) -{ - return false; -} - -static inline void efx_channel_enable(struct efx_channel *channel) -{ -} - -static inline bool efx_channel_disable(struct efx_channel *channel) -{ - return true; -} -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /** * struct efx_msi_context - Context for each MSI * @efx: The associated NIC @@ -1174,6 +1056,7 @@ struct efx_mtd_partition { * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1220,6 +1103,7 @@ struct efx_mtd_partition { * @ptp_set_ts_config: Set hardware timestamp configuration. The flags * and tx_type will already have been validated but this operation * must validate and update rx_filter. + * @get_phys_port_id: Get the underlying physical port id. * @set_mac_address: Set the MAC address of the device * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. @@ -1236,6 +1120,7 @@ struct efx_mtd_partition { * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) @@ -1302,7 +1187,8 @@ struct efx_nic_type { unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, - const u32 *rx_indir_table); + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1358,6 +1244,8 @@ struct efx_nic_type { int (*sriov_configure)(struct efx_nic *efx, int num_vfs); int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); @@ -1372,8 +1260,6 @@ struct efx_nic_type { struct ifla_vf_info *ivi); int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, int link_state); - int (*sriov_get_phys_port_id)(struct efx_nic *efx, - struct netdev_phys_item_id *ppid); int (*vswitching_probe)(struct efx_nic *efx); int (*vswitching_restore)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx); @@ -1394,12 +1280,14 @@ struct efx_nic_type { unsigned int rx_buffer_padding; bool can_rx_scatter; bool always_rx_scatter; + bool option_descriptors; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; unsigned int max_rx_ip_filters; u32 hwtstamp_filters; + unsigned int rx_hash_key_size; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 223774635cba..85cf131288b7 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + /* Decide whether we can use TX PIO, ie. write packet data directly into * a buffer on the device. This can reduce latency at the expense of * throughput, so we only do this if both hardware and software TX rings @@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) { struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); - return tx_queue->piobuf && - __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && - __efx_nic_tx_is_empty(partner, partner->insert_count); + + return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(partner); } /* Decide whether to push a TX descriptor to the NIC vs merely writing @@ -332,6 +343,7 @@ enum { * @pio_write_base: Base address for writing PIO buffers * @pio_write_vi_base: Relative VI number for @pio_write_base * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot * @rx_rss_context: Firmware handle for our RSS context @@ -369,6 +381,7 @@ struct efx_ef10_nic_data { void __iomem *wc_membase, *pio_write_base; unsigned int pio_write_vi_base; unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; bool must_restore_piobufs; u32 rx_rss_context; bool rx_rss_context_exclusive; @@ -613,6 +626,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 5f4ad4f3518f..31587f4066ab 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -665,8 +665,7 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && - !efx_channel_busy_polling(channel)) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4e54e5dc9fcb..af7cd8565a41 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -332,12 +332,33 @@ fail1: return rc; } +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table) + const u32 *rx_indir_table, const u8 *key) { efx_oword_t temp; /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rx_hash_key, key, sizeof(temp)); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); @@ -402,7 +423,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx, false, efx->rx_indir_table); + siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); efx->rss_active = true; /* Enable event logging */ @@ -979,6 +1000,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -1044,6 +1066,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, + .option_descriptors = false, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1053,4 +1076,5 @@ const struct efx_nic_type siena_a0_nic_type = { .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, }; diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c index 9abcf4aded30..0b766fdbcddb 100644 --- a/drivers/net/ethernet/sfc/sriov.c +++ b/drivers/net/ethernet/sfc/sriov.c @@ -73,14 +73,3 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } - -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_get_phys_port_id) - return efx->type->sriov_get_phys_port_id(efx, ppid); - else - return -EOPNOTSUPP; -} diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h index ba1762e7f216..84c7984edcaf 100644 --- a/drivers/net/ethernet/sfc/sriov.h +++ b/drivers/net/ethernet/sfc/sriov.h @@ -23,9 +23,6 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi); int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, int link_state); -int efx_sriov_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid); - #endif /* CONFIG_SFC_SRIOV */ #endif /* EFX_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 3c0151424d12..ff88d60aa6d5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -28,7 +28,6 @@ #ifdef EFX_USE_PIO -#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; @@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->insert_count = 0; tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 55a95e1d69d6..5f2737189c72 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -264,7 +264,6 @@ struct epic_private { spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t napi_lock; struct napi_struct napi; - unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; unsigned int cur_rx, dirty_rx; @@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&ep->lock); spin_lock_init(&ep->napi_lock); - ep->reschedule_in_poll = 0; /* Bring the chip out of low-power mode. */ ew32(GENCTL, 0x4200); @@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) handled = 1; - if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + if (status & EpicNapiEvent) { spin_lock(&ep->napi_lock); if (napi_schedule_prep(&ep->napi)) { epic_napi_irq_off(dev, ep); __napi_schedule(&ep->napi); - } else - ep->reschedule_in_poll++; + } spin_unlock(&ep->napi_lock); } status &= ~EpicNapiEvent; @@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget) { struct epic_private *ep = container_of(napi, struct epic_private, napi); struct net_device *dev = ep->mii.dev; - int work_done = 0; void __iomem *ioaddr = ep->ioaddr; - -rx_action: + int work_done; epic_tx(dev, ep); - work_done += epic_rx(dev, budget); + work_done = epic_rx(dev, budget); epic_rx_err(dev, ep); - if (work_done < budget) { + if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; - int more; - - /* A bit baroque but it avoids a (space hungry) spin_unlock */ spin_lock_irqsave(&ep->napi_lock, flags); - more = ep->reschedule_in_poll; - if (!more) { - __napi_complete(napi); - ew32(INTSTAT, EpicNapiEvent); - epic_napi_irq_on(dev, ep); - } else - ep->reschedule_in_poll--; - + ew32(INTSTAT, EpicNapiEvent); + epic_napi_irq_on(dev, ep); spin_unlock_irqrestore(&ep->napi_lock, flags); - - if (more) - goto rx_action; } return work_done; diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 67154621abcf..97280daba27f 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -113,6 +113,7 @@ struct smc_private { struct mii_if_info mii_if; int duplex; int rx_ovrn; + unsigned long last_rx; }; /* Special definitions for Megahertz multifunction cards */ @@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev) if (!(rx_status & RS_ERRORS)) { /* do stuff to make a new packet */ struct sk_buff *skb; + struct smc_private *smc = netdev_priv(dev); /* Note: packet_length adds 5 or 6 extra bytes here! */ skb = netdev_alloc_skb(dev, packet_length+2); @@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - dev->last_rx = jiffies; + smc->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += packet_length; if (rx_status & RS_MULTICAST) @@ -1790,7 +1792,7 @@ static void media_check(u_long arg) } /* Ignore collisions unless we've had no rx's recently */ - if (time_after(jiffies, dev->last_rx + HZ)) { + if (time_after(jiffies, smc->last_rx + HZ)) { if (smc->tx_err || (smc->media_status & EPH_16COL)) media |= EPH_16COL; } diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 3174aebb322f..2fa3c1d03abc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget) smsc9420_pci_flush_write(pd); if (work_done < budget) { - napi_complete(&pd->napi); + napi_complete_done(&pd->napi, work_done); /* re-enable RX DMA interrupts */ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig index 1c1157d2bd40..ecd7a5edef5d 100644 --- a/drivers/net/ethernet/stmicro/Kconfig +++ b/drivers/net/ethernet/stmicro/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO default y depends on HAS_IOMEM ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet) card based on Synopsys Ethernet IP + Cores, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index ab66248a4b78..cfbe3634dfa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,5 +1,5 @@ config STMMAC_ETH - tristate "STMicroelectronics 10/100/1000 Ethernet driver" + tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver" depends on HAS_IOMEM && HAS_DMA select MII select PHYLIB @@ -7,9 +7,8 @@ config STMMAC_ETH imply PTP_1588_CLOCK select RESET_CONTROLLER ---help--- - This is the driver for the Ethernet IPs are built around a - Synopsys IP Core and only tested on the STMicroelectronics - platforms. + This is the driver for the Ethernet IPs built around a + Synopsys IP Core. if STMMAC_ETH @@ -29,6 +28,15 @@ config STMMAC_PLATFORM if STMMAC_PLATFORM +config DWMAC_DWC_QOS_ETH + tristate "Support for snps,dwc-qos-ethernet.txt DT binding." + select PHYLIB + select CRC32 + select MII + depends on OF && HAS_DMA + help + Support for chips using the snps,dwc-qos-ethernet.txt DT binding. + config DWMAC_GENERIC tristate "Generic driver for DWMAC" default STMMAC_PLATFORM @@ -143,11 +151,11 @@ config STMMAC_PCI tristate "STMMAC PCI bus support" depends on STMMAC_ETH && PCI ---help--- - This is to select the Synopsys DWMAC available on PCI devices, - if you have a controller with this interface, say Y or M here. + This selects the platform specific bus support for the stmmac driver. + This driver was tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit. - This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 - D1215994A VIRTEX FPGA board. + If you have a controller with this interface, say Y or M here. If unsure, say N. endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 8f83a86ba13c..700c60336674 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o +obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b13a144f72ad..75e2666df940 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -323,6 +323,9 @@ struct dma_features { /* TX and RX number of channels */ unsigned int number_rx_channel; unsigned int number_tx_channel; + /* TX and RX number of queues */ + unsigned int number_rx_queues; + unsigned int number_tx_queues; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; }; @@ -454,6 +457,8 @@ struct stmmac_ops { void (*core_init)(struct mac_device_info *hw, int mtu); /* Enable and verify that the IPC module is supported */ int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); /* Dump MAC registers */ void (*dump_regs)(struct mac_device_info *hw); /* Handle extra events on specific interrupts hw dependent */ @@ -471,7 +476,8 @@ struct stmmac_ops { unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c new file mode 100644 index 000000000000..1a3fa3d9f855 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -0,0 +1,202 @@ +/* + * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver + * + * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) +{ + struct device_node *np = pdev->dev.of_node; + u32 burst_map = 0; + u32 bit_index = 0; + u32 a_index = 0; + + if (!plat_dat->axi) { + plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL); + + if (!plat_dat->axi) + return -ENOMEM; + } + + plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi"); + if (of_property_read_u32(np, "snps,write-requests", + &plat_dat->axi->axi_wr_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_wr_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_wr_osr_lmt--; + } + + if (of_property_read_u32(np, "read,read-requests", + &plat_dat->axi->axi_rd_osr_lmt)) { + /** + * Since the register has a reset value of 1, if property + * is missing, default to 1. + */ + plat_dat->axi->axi_rd_osr_lmt = 1; + } else { + /** + * If property exists, to keep the behavior from dwc_eth_qos, + * subtract one after parsing. + */ + plat_dat->axi->axi_rd_osr_lmt--; + } + of_property_read_u32(np, "snps,burst-map", &burst_map); + + /* converts burst-map bitmask to burst array */ + for (bit_index = 0; bit_index < 7; bit_index++) { + if (burst_map & (1 << bit_index)) { + switch (bit_index) { + case 0: + plat_dat->axi->axi_blen[a_index] = 4; break; + case 1: + plat_dat->axi->axi_blen[a_index] = 8; break; + case 2: + plat_dat->axi->axi_blen[a_index] = 16; break; + case 3: + plat_dat->axi->axi_blen[a_index] = 32; break; + case 4: + plat_dat->axi->axi_blen[a_index] = 64; break; + case 5: + plat_dat->axi->axi_blen[a_index] = 128; break; + case 6: + plat_dat->axi->axi_blen[a_index] = 256; break; + default: + break; + } + a_index++; + } + } + + /* dwc-qos needs GMAC4, AAL, TSO and PMT */ + plat_dat->has_gmac4 = 1; + plat_dat->dma_cfg->aal = 1; + plat_dat->tso_en = 1; + plat_dat->pmt = 1; + + return 0; +} + +static int dwc_eth_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct resource *res; + int ret; + + memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); + + /** + * Since stmmac_platform supports name IRQ only, basic platform + * resource initialization is done in the glue logic. + */ + stmmac_res.irq = platform_get_irq(pdev, 0); + if (stmmac_res.irq < 0) { + if (stmmac_res.irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "IRQ configuration information not found\n"); + + return stmmac_res.irq; + } + stmmac_res.wol_irq = stmmac_res.irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(stmmac_res.addr)) + return PTR_ERR(stmmac_res.addr); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(plat_dat->stmmac_clk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + ret = PTR_ERR(plat_dat->stmmac_clk); + plat_dat->stmmac_clk = NULL; + goto err_remove_config_dt; + } + clk_prepare_enable(plat_dat->stmmac_clk); + + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(plat_dat->pclk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + ret = PTR_ERR(plat_dat->pclk); + plat_dat->pclk = NULL; + goto err_out_clk_dis_phy; + } + clk_prepare_enable(plat_dat->pclk); + + ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); + if (ret) + goto err_out_clk_dis_aper; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_out_clk_dis_aper; + + return 0; + +err_out_clk_dis_aper: + clk_disable_unprepare(plat_dat->pclk); +err_out_clk_dis_phy: + clk_disable_unprepare(plat_dat->stmmac_clk); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static int dwc_eth_dwmac_remove(struct platform_device *pdev) +{ + return stmmac_pltfr_remove(pdev); +} + +static const struct of_device_id dwc_eth_dwmac_match[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10", }, + { } +}; +MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); + +static struct platform_driver dwc_eth_dwmac_driver = { + .probe = dwc_eth_dwmac_probe, + .remove = dwc_eth_dwmac_remove, + .driver = { + .name = "dwc-eth-dwmac", + .of_match_table = dwc_eth_dwmac_match, + }, +}; +module_platform_driver(dwc_eth_dwmac_driver); + +MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index ffaed1f35efe..9685555932ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -35,10 +35,6 @@ #define PRG_ETH0_TXDLY_SHIFT 5 #define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) -#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) -#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) /* divider for the result of m250_sel */ #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 @@ -69,6 +65,8 @@ struct meson8b_dwmac { struct clk_divider m25_div; struct clk *m25_div_clk; + + u32 tx_delay_ns; }; static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, @@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; unsigned long clk_rate; + u8 tx_dly_val = 0; switch (dwmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: + /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where + * 8ns are exactly one cycle of the 125MHz RGMII TX clock): + * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3 + */ + tx_dly_val = dwmac->tx_delay_ns >> 1; + /* fall through */ + + case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: /* Generate a 25MHz clock for the PHY */ clk_rate = 25 * 1000 * 1000; @@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); - /* TX clock delay - all known boards use a 1/4 cycle delay */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, - PRG_ETH0_TXDLY_QUARTER); + tx_dly_val << PRG_ETH0_TXDLY_SHIFT); break; case PHY_INTERFACE_MODE_RMII: @@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + /* use 2ns as fallback since this value was previously hardcoded */ + if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", + &dwmac->tx_delay_ns)) + dwmac->tx_delay_ns = 2; + ret = meson8b_init_clk(dwmac); if (ret) goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 1f997027ae51..17d4bbaeb65c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) * mode. Create a copy of the core reset handle so it can be used by * the driver later. */ - dwmac->stmmac_rst = stpriv->stmmac_rst; + dwmac->stmmac_rst = stpriv->plat->stmmac_rst; ret = socfpga_dwmac_set_phy_mode(dwmac); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 5484fd726d5a..a26715bf4564 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -347,11 +347,14 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, return ret; } -static void dwmac1000_set_eee_mode(struct mac_device_info *hw) +static void dwmac1000_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) { void __iomem *ioaddr = hw->pcsr; u32 value; + /*TODO - en_tx_lpi_clockgating treatment */ + /* Enable the link status receive on RGMII, SGMII ore SMII * receive path and instruct the transmit to enter in LPI * state. diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 3e8d4fefa5e0..db45134fddf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -22,6 +22,7 @@ #define GMAC_HASH_TAB_32_63 0x00000014 #define GMAC_RX_FLOW_CTRL 0x00000090 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_RXQ_CTRL0 0x000000a0 #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 #define GMAC_PCS_BASE 0x000000e0 @@ -44,6 +45,11 @@ #define GMAC_MAX_PERFECT_ADDRESSES 128 +/* MAC RX Queue Enable */ +#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) +#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) + /* MAC Flow Control RX */ #define GMAC_RX_FLOW_CTRL_RFE BIT(0) @@ -84,6 +90,19 @@ enum power_event { power_down = 0x00000001, }; +/* Energy Efficient Ethernet (EEE) for GMAC4 + * + * LPI status, timer and control register offset + */ +#define GMAC4_LPI_CTRL_STATUS 0xd0 +#define GMAC4_LPI_TIMER_CTRL 0xd4 + +/* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ + /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) #define GMAC_DEBUG_TFCSTS_SHIFT 17 @@ -133,6 +152,8 @@ enum power_event { /* MAC HW features2 bitmap */ #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) +#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index eaed7cb21867..202216cd6789 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -59,6 +59,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) writel(value, ioaddr + GMAC_INT_EN); } +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + + value &= GMAC_RX_QUEUE_CLEAR(queue); + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + + writel(value, ioaddr + GMAC_RXQ_CTRL0); +} + static void dwmac4_dump_regs(struct mac_device_info *hw) { void __iomem *ioaddr = hw->pcsr; @@ -126,6 +137,65 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw, GMAC_ADDR_LOW(reg_n)); } +static void dwmac4_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (en_tx_lpi_clockgating) + value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (link) + value |= GMAC4_LPI_CTRL_STATUS_PLS; + else + value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); +} + static void dwmac4_set_filter(struct mac_device_info *hw, struct net_device *dev) { @@ -392,12 +462,17 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) static const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, .dump_regs = dwmac4_dump_regs, .host_irq_status = dwmac4_irq_status, .flow_ctrl = dwmac4_flow_ctrl, .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 8196ab5fc33c..377d1b44d4f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -303,6 +303,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; dma_cap->number_tx_channel = ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + /* TX and RX number of queues */ + dma_cap->number_rx_queues = + ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; + dma_cap->number_tx_queues = + ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index eab04aeeeb95..bf8a83ef96f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -106,9 +106,6 @@ struct stmmac_priv { u32 msg_enable; int wolopts; int wol_irq; - struct clk *stmmac_clk; - struct clk *pclk; - struct reset_control *stmmac_rst; int clk_csr; struct timer_list eee_ctrl_timer; int lpi_irq; @@ -120,8 +117,6 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; - struct clk *clk_ptp_ref; - unsigned int clk_ptp_rate; u32 adv_ts; int use_riwt; int irq_wake; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 699ee1d30426..322e5c6a0d4b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -712,7 +712,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; @@ -722,7 +722,7 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { - unsigned long clk = clk_get_rate(priv->stmmac_clk); + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); if (!clk) return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e3f6389e1b01..bd83bf9ef326 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -158,7 +158,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { u32 clk_rate; - clk_rate = clk_get_rate(priv->stmmac_clk); + clk_rate = clk_get_rate(priv->plat->stmmac_clk); /* Platform provided default clk_csr would be assumed valid * for all other cases except for the below mentioned ones. @@ -239,7 +239,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if ((priv->dirty_tx == priv->cur_tx) && (priv->tx_path_in_lpi_mode == false)) - priv->hw->mac->set_eee_mode(priv->hw); + priv->hw->mac->set_eee_mode(priv->hw, + priv->plat->en_tx_lpi_clockgating); } /** @@ -606,7 +607,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ptpaddr, priv->clk_ptp_rate, + priv->ptpaddr, priv->plat->clk_ptp_rate, priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc); @@ -616,7 +617,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) * where, freq_div_ratio = 1e9ns/sec_inc */ temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->clk_ptp_rate); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend); @@ -644,18 +645,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - /* Fall-back to main clock in case of no PTP ref is passed */ - priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); - if (IS_ERR(priv->clk_ptp_ref)) { - priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); - priv->clk_ptp_ref = NULL; - netdev_dbg(priv->dev, "PTP uses main clock\n"); - } else { - clk_prepare_enable(priv->clk_ptp_ref); - priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); - netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate); - } - priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x core */ if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) @@ -682,8 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { - if (priv->clk_ptp_ref) - clk_disable_unprepare(priv->clk_ptp_ref); + if (priv->plat->clk_ptp_ref) + clk_disable_unprepare(priv->plat->clk_ptp_ref); stmmac_ptp_unregister(priv); } @@ -1271,6 +1260,28 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) } /** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + int rx_count = priv->dma_cap.number_rx_queues; + int queue = 0; + + /* If GMAC does not have multiple queues, then this is not necessary*/ + if (rx_count == 1) + return; + + /** + * If the core is synthesized with multiple rx queues / multiple + * dma channels, then rx queues will be disabled by default. + * For now only rx queue 0 is enabled. + */ + priv->hw->mac->rx_queue_enable(priv->hw, queue); +} + +/** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure * Description: it is used for configuring the DMA operation mode register in @@ -1691,6 +1702,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); + /* Initialize MAC RX Queues */ + if (priv->hw->mac->rx_queue_enable) + stmmac_mac_enable_rx_queues(priv); + ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); @@ -1711,8 +1726,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (init_ptp) { ret = stmmac_init_ptp(priv); - if (ret) - netdev_warn(priv->dev, "fail to init PTP.\n"); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); } #ifdef CONFIG_DEBUG_FS @@ -2669,7 +2686,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); stmmac_enable_dma_irq(priv); } return work_done; @@ -3251,44 +3268,8 @@ int stmmac_dvr_probe(struct device *device, if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; - priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_clk)) { - netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n", - __func__); - /* If failed to obtain stmmac_clk and specific clk_csr value - * is NOT passed from the platform, probe fail. - */ - if (!priv->plat->clk_csr) { - ret = PTR_ERR(priv->stmmac_clk); - goto error_clk_get; - } else { - priv->stmmac_clk = NULL; - } - } - clk_prepare_enable(priv->stmmac_clk); - - priv->pclk = devm_clk_get(priv->device, "pclk"); - if (IS_ERR(priv->pclk)) { - if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_pclk_get; - } - priv->pclk = NULL; - } - clk_prepare_enable(priv->pclk); - - priv->stmmac_rst = devm_reset_control_get(priv->device, - STMMAC_RESOURCE_NAME); - if (IS_ERR(priv->stmmac_rst)) { - if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto error_hw_init; - } - dev_info(priv->device, "no reset control found\n"); - priv->stmmac_rst = NULL; - } - if (priv->stmmac_rst) - reset_control_deassert(priv->stmmac_rst); + if (priv->plat->stmmac_rst) + reset_control_deassert(priv->plat->stmmac_rst); /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); @@ -3391,10 +3372,6 @@ error_netdev_register: error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: - clk_disable_unprepare(priv->pclk); -error_pclk_get: - clk_disable_unprepare(priv->stmmac_clk); -error_clk_get: free_netdev(ndev); return ret; @@ -3420,10 +3397,10 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); - if (priv->stmmac_rst) - reset_control_assert(priv->stmmac_rst); - clk_disable_unprepare(priv->pclk); - clk_disable_unprepare(priv->stmmac_clk); + if (priv->plat->stmmac_rst) + reset_control_assert(priv->plat->stmmac_rst); + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) @@ -3472,8 +3449,8 @@ int stmmac_suspend(struct device *dev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable(priv->pclk); - clk_disable(priv->stmmac_clk); + clk_disable(priv->plat->pclk); + clk_disable(priv->plat->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -3513,8 +3490,8 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ - clk_enable(priv->stmmac_clk); - clk_enable(priv->pclk); + clk_enable(priv->plat->stmmac_clk); + clk_enable(priv->plat->pclk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 36942f5a6a53..4963ccdb31e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -121,7 +121,6 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); @@ -181,10 +180,19 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, mdio = false; } - /* If snps,dwmac-mdio is passed from DT, always register the MDIO */ - for_each_child_of_node(np, plat->mdio_node) { - if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio")) - break; + /* exception for dwmac-dwc-qos-eth glue logic */ + if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + } else { + /** + * If snps,dwmac-mdio is passed from DT, always register + * the MDIO + */ + for_each_child_of_node(np, plat->mdio_node) { + if (of_device_is_compatible(plat->mdio_node, + "snps,dwmac-mdio")) + break; + } } if (plat->mdio_node) { @@ -249,6 +257,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->en_tx_lpi_clockgating = + of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. */ @@ -333,7 +344,54 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->axi = stmmac_axi_setup(pdev); + /* clock setup */ + plat->stmmac_clk = devm_clk_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Cannot get CSR clock\n"); + plat->stmmac_clk = NULL; + } + clk_prepare_enable(plat->stmmac_clk); + + plat->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(plat->pclk)) { + if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) + goto error_pclk_get; + + plat->pclk = NULL; + } + clk_prepare_enable(plat->pclk); + + /* Fall-back to main clock in case of no PTP ref is passed */ + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + dev_warn(&pdev->dev, "PTP uses main clock\n"); + } else { + clk_prepare_enable(plat->clk_ptp_ref); + plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); + dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); + } + + plat->stmmac_rst = devm_reset_control_get(&pdev->dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_rst)) { + if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) + goto error_hw_init; + + dev_info(&pdev->dev, "no reset control found\n"); + plat->stmmac_rst = NULL; + } + return plat; + +error_hw_init: + clk_disable_unprepare(plat->pclk); +error_pclk_get: + clk_disable_unprepare(plat->stmmac_clk); + + return ERR_PTR(-EPROBE_DEFER); } /** diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f90d1af6d390..57978056b336 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget) work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); niu_ldg_rearm(np, lp, 1); } return work_done; @@ -6294,8 +6294,8 @@ no_rings: stats->tx_errors = errors; } -static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void niu_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); @@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } - - return stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index d277e4107976..5c5952e782cd 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget) gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - napi_complete(napi); + napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8878b75d68b4..191c8ade6155 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -860,7 +860,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget) int processed = vnet_event_napi(port, budget); if (processed < budget) { - napi_complete(napi); + napi_complete_done(napi, processed); port->rx_event &= ~LDC_EVENT_DATA_READY; vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); } diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig deleted file mode 100644 index 8276ee5a7d54..000000000000 --- a/drivers/net/ethernet/synopsys/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -# -# Synopsys network device configuration -# - -config NET_VENDOR_SYNOPSYS - bool "Synopsys devices" - default y - ---help--- - If you have a network (Ethernet) device belonging to this class, say Y. - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Synopsys devices. If you say Y, you will be asked - for your specific device in the following questions. - -if NET_VENDOR_SYNOPSYS - -config SYNOPSYS_DWC_ETH_QOS - tristate "Sypnopsys DWC Ethernet QOS v4.10a support" - select PHYLIB - select CRC32 - select MII - depends on OF && HAS_DMA - ---help--- - This driver supports the DWC Ethernet QoS from Synopsys - -endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile deleted file mode 100644 index 7a375723fc18..000000000000 --- a/drivers/net/ethernet/synopsys/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Synopsys network device drivers. -# - -obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c deleted file mode 100644 index 09f5a67da35e..000000000000 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ /dev/null @@ -1,2998 +0,0 @@ -/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver - * - * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). - * This version introduced a lot of changes which breaks backwards - * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). - * Some fields differ between version 4.00a and 4.10a, mainly the interrupt - * bit fields. The driver could be made compatible with 4.00, if all relevant - * HW erratas are handled. - * - * The GMAC is highly configurable at synthesis time. This driver has been - * developed for a subset of the total available feature set. Currently - * it supports: - * - TSO - * - Checksum offload for RX and TX. - * - Energy efficient ethernet. - * - GMII phy interface. - * - The statistics module. - * - Single RX and TX queue. - * - * Copyright (C) 2015 Axis Communications AB. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -#include <linux/clk.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/ethtool.h> -#include <linux/stat.h> -#include <linux/types.h> - -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> - -#include <linux/phy.h> -#include <linux/mii.h> -#include <linux/dma-mapping.h> -#include <linux/vmalloc.h> - -#include <linux/device.h> -#include <linux/bitrev.h> -#include <linux/crc32.h> - -#include <linux/of.h> -#include <linux/interrupt.h> -#include <linux/clocksource.h> -#include <linux/net_tstamp.h> -#include <linux/pm_runtime.h> -#include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_mdio.h> -#include <linux/timer.h> -#include <linux/tcp.h> - -#define DRIVER_NAME "dwceqos" -#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" -#define DRIVER_VERSION "0.9" - -#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) - -#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ - -#define DWCEQOS_LPI_TIMER_MIN 8 -#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) - -#define DWCEQOS_RX_BUF_SIZE 2048 - -#define DWCEQOS_RX_DCNT 256 -#define DWCEQOS_TX_DCNT 256 - -#define DWCEQOS_HASH_TABLE_SIZE 64 - -/* The size field in the DMA descriptor is 14 bits */ -#define BYTES_PER_DMA_DESC 16376 - -/* Hardware registers */ -#define START_MAC_REG_OFFSET 0x0000 -#define MAX_MAC_REG_OFFSET 0x0bd0 -#define START_MTL_REG_OFFSET 0x0c00 -#define MAX_MTL_REG_OFFSET 0x0d7c -#define START_DMA_REG_OFFSET 0x1000 -#define MAX_DMA_REG_OFFSET 0x117C - -#define REG_SPACE_SIZE 0x1800 - -/* DMA */ -#define REG_DWCEQOS_DMA_MODE 0x1000 -#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 -#define REG_DWCEQOS_DMA_IS 0x1008 -#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c - -/* DMA channel registers */ -#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 -#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 -#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 -#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c -#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 -#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 -#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c -#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 -#define REG_DWCEQOS_DMA_CH0_IE 0x1134 -#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 -#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c -#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 -#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c -#define REG_DWCEQOS_DMA_CH0_STA 0x1160 - -#define DWCEQOS_DMA_MODE_TXPR BIT(11) -#define DWCEQOS_DMA_MODE_DA BIT(1) - -#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) -#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) -#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) - -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ - (((x) << 16) & 0x000F0000) -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) - -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ - (((x) << 24) & 0x0F000000) -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 -#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) - -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ - (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) -#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) - -#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) -#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) - -#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) -#define DWCEQOS_DMA_CH_CTRL_START BIT(0) -#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) -#define DWCEQOS_DMA_CH_TX_OSP BIT(4) -#define DWCEQOS_DMA_CH_TX_TSE BIT(12) - -#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) -#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) -#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) -#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) -#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) -#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) - -#define DWCEQOS_DMA_IS_DC0IS BIT(0) -#define DWCEQOS_DMA_IS_MTLIS BIT(16) -#define DWCEQOS_DMA_IS_MACIS BIT(17) - -#define DWCEQOS_DMA_CH0_IS_TI BIT(0) -#define DWCEQOS_DMA_CH0_IS_RI BIT(6) -#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) -#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) -#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) -#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) - -#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) -#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) - -#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) -#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) - -/* DMA descriptor bits for RX normal descriptor (read format) */ -#define DWCEQOS_DMA_RDES3_OWN BIT(31) -#define DWCEQOS_DMA_RDES3_INTE BIT(30) -#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) -#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) - -/* DMA descriptor bits for RX normal descriptor (write back format) */ -#define DWCEQOS_DMA_RDES1_IPCE BIT(7) -#define DWCEQOS_DMA_RDES3_ES BIT(15) -#define DWCEQOS_DMA_RDES3_E_JT BIT(14) -#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) -#define DWCEQOS_DMA_RDES1_PT 0x00000007 -#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) -#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) -#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 - -/* DMA descriptor bits for TX normal descriptor (read format) */ -#define DWCEQOS_DMA_TDES2_IOC BIT(31) -#define DWCEQOS_DMA_TDES3_OWN BIT(31) -#define DWCEQOS_DMA_TDES3_CTXT BIT(30) -#define DWCEQOS_DMA_TDES3_FD BIT(29) -#define DWCEQOS_DMA_TDES3_LD BIT(28) -#define DWCEQOS_DMA_TDES3_CIPH BIT(16) -#define DWCEQOS_DMA_TDES3_CIPP BIT(17) -#define DWCEQOS_DMA_TDES3_CA 0x00030000 -#define DWCEQOS_DMA_TDES3_TSE BIT(18) -#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) -#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) - -#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) - -/* DMA channel states */ -#define DMA_TX_CH_STOPPED 0 -#define DMA_TX_CH_SUSPENDED 6 - -#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) - -/* MTL */ -#define REG_DWCEQOS_MTL_OPER 0x0c00 -#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c -#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 -#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 - -#define REG_DWCEQOS_MTL_IS 0x0c20 -#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 -#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 -#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 -#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c - -#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c - -#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 - -#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) -#define DWCEQOS_MTL_TXQ_TSF BIT(1) -#define DWCEQOS_MTL_TXQ_FTQ BIT(0) -#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 - -#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) - -#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) -#define DWCEQOS_MTL_RXQ_EHFC BIT(7) -#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) -#define DWCEQOS_MTL_RXQ_FEP BIT(4) -#define DWCEQOS_MTL_RXQ_FUP BIT(3) -#define DWCEQOS_MTL_RXQ_RSF BIT(5) -#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) - -/* MAC */ -#define REG_DWCEQOS_MAC_CFG 0x0000 -#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 -#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 -#define REG_DWCEQOS_MAC_WD_TO 0x000c -#define REG_DWCEQOS_HASTABLE_LO 0x0010 -#define REG_DWCEQOS_HASTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_IS 0x00b0 -#define REG_DWCEQOS_MAC_IE 0x00b4 -#define REG_DWCEQOS_MAC_STAT 0x00b8 -#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 -#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 -#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 -#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 -#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 -#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c -#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 -#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 -#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 -#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 -#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 -#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 -#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 -#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc -#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 -#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 - -#define DWCEQOS_MAC_CFG_ACS BIT(20) -#define DWCEQOS_MAC_CFG_JD BIT(17) -#define DWCEQOS_MAC_CFG_JE BIT(16) -#define DWCEQOS_MAC_CFG_PS BIT(15) -#define DWCEQOS_MAC_CFG_FES BIT(14) -#define DWCEQOS_MAC_CFG_DM BIT(13) -#define DWCEQOS_MAC_CFG_DO BIT(10) -#define DWCEQOS_MAC_CFG_TE BIT(1) -#define DWCEQOS_MAC_CFG_IPC BIT(27) -#define DWCEQOS_MAC_CFG_RE BIT(0) - -#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) -#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) - -#define DWCEQOS_MAC_IS_LPI_INT BIT(5) -#define DWCEQOS_MAC_IS_MMC_INT BIT(8) - -#define DWCEQOS_MAC_RXQ_EN BIT(1) -#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) -#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) -#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) -#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) -#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) -#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) -#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) -#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) -#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) -#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) -#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) - -#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) -#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 -#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 -#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 -#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 -#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 -#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 -#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c -#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) -#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) - -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) -#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) - -#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) - -#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ - DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) - -#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) - -#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) -#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) -#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) - -/* Features */ -#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) -#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) -#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) -#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) -#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) -#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) - -#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) -#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) -#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) - -#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ - (1 + (((feature1) & 0x1fc0000) >> 18)) - -#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) -#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) - -#define DWCEQOS_DMA_MODE_SWR BIT(0) - -#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 - -/* Mac Management Counters */ -#define REG_DWCEQOS_MMC_CTRL 0x0700 -#define REG_DWCEQOS_MMC_RXIRQ 0x0704 -#define REG_DWCEQOS_MMC_TXIRQ 0x0708 -#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c -#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 - -#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) -#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) - -#define DWC_MMC_TXLPITRANSCNTR 0x07F0 -#define DWC_MMC_TXLPIUSCNTR 0x07EC -#define DWC_MMC_TXOVERSIZE_G 0x0778 -#define DWC_MMC_TXVLANPACKETS_G 0x0774 -#define DWC_MMC_TXPAUSEPACKETS 0x0770 -#define DWC_MMC_TXEXCESSDEF 0x076C -#define DWC_MMC_TXPACKETCOUNT_G 0x0768 -#define DWC_MMC_TXOCTETCOUNT_G 0x0764 -#define DWC_MMC_TXCARRIERERROR 0x0760 -#define DWC_MMC_TXEXCESSCOL 0x075C -#define DWC_MMC_TXLATECOL 0x0758 -#define DWC_MMC_TXDEFERRED 0x0754 -#define DWC_MMC_TXMULTICOL_G 0x0750 -#define DWC_MMC_TXSINGLECOL_G 0x074C -#define DWC_MMC_TXUNDERFLOWERROR 0x0748 -#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 -#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 -#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C -#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 -#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 -#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 -#define DWC_MMC_TX128TO255OCTETS_GB 0x072C -#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 -#define DWC_MMC_TX64OCTETS_GB 0x0724 -#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 -#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C -#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 -#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 - -#define DWC_MMC_RXLPITRANSCNTR 0x07F8 -#define DWC_MMC_RXLPIUSCNTR 0x07F4 -#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 -#define DWC_MMC_RXRCVERROR 0x07E0 -#define DWC_MMC_RXWATCHDOG 0x07DC -#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 -#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 -#define DWC_MMC_RXPAUSEPACKETS 0x07D0 -#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC -#define DWC_MMC_RXLENGTHERROR 0x07C8 -#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 -#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 -#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC -#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 -#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 -#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 -#define DWC_MMC_RX64OCTETS_GB 0x07AC -#define DWC_MMC_RXOVERSIZE_G 0x07A8 -#define DWC_MMC_RXUNDERSIZE_G 0x07A4 -#define DWC_MMC_RXJABBERERROR 0x07A0 -#define DWC_MMC_RXRUNTERROR 0x079C -#define DWC_MMC_RXALIGNMENTERROR 0x0798 -#define DWC_MMC_RXCRCERROR 0x0794 -#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 -#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C -#define DWC_MMC_RXOCTETCOUNT_G 0x0788 -#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 -#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 - -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); - -/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ -struct ring_desc { - struct sk_buff *skb; - dma_addr_t mapping; - size_t len; -}; - -/* DMA hardware descriptor */ -struct dwceqos_dma_desc { - u32 des0; - u32 des1; - u32 des2; - u32 des3; -} ____cacheline_aligned; - -struct dwceqos_mmc_counters { - __u64 txlpitranscntr; - __u64 txpiuscntr; - __u64 txoversize_g; - __u64 txvlanpackets_g; - __u64 txpausepackets; - __u64 txexcessdef; - __u64 txpacketcount_g; - __u64 txoctetcount_g; - __u64 txcarriererror; - __u64 txexcesscol; - __u64 txlatecol; - __u64 txdeferred; - __u64 txmulticol_g; - __u64 txsinglecol_g; - __u64 txunderflowerror; - __u64 txbroadcastpackets_gb; - __u64 txmulticastpackets_gb; - __u64 txunicastpackets_gb; - __u64 tx1024tomaxoctets_gb; - __u64 tx512to1023octets_gb; - __u64 tx256to511octets_gb; - __u64 tx128to255octets_gb; - __u64 tx65to127octets_gb; - __u64 tx64octets_gb; - __u64 txmulticastpackets_g; - __u64 txbroadcastpackets_g; - __u64 txpacketcount_gb; - __u64 txoctetcount_gb; - - __u64 rxlpitranscntr; - __u64 rxlpiuscntr; - __u64 rxctrlpackets_g; - __u64 rxrcverror; - __u64 rxwatchdog; - __u64 rxvlanpackets_gb; - __u64 rxfifooverflow; - __u64 rxpausepackets; - __u64 rxoutofrangetype; - __u64 rxlengtherror; - __u64 rxunicastpackets_g; - __u64 rx1024tomaxoctets_gb; - __u64 rx512to1023octets_gb; - __u64 rx256to511octets_gb; - __u64 rx128to255octets_gb; - __u64 rx65to127octets_gb; - __u64 rx64octets_gb; - __u64 rxoversize_g; - __u64 rxundersize_g; - __u64 rxjabbererror; - __u64 rxrunterror; - __u64 rxalignmenterror; - __u64 rxcrcerror; - __u64 rxmulticastpackets_g; - __u64 rxbroadcastpackets_g; - __u64 rxoctetcount_g; - __u64 rxoctetcount_gb; - __u64 rxpacketcount_gb; -}; - -/* Ethtool statistics */ - -struct dwceqos_stat { - const char stat_name[ETH_GSTRING_LEN]; - int offset; -}; - -#define STAT_ITEM(name, var) \ - {\ - name,\ - offsetof(struct dwceqos_mmc_counters, var),\ - } - -static const struct dwceqos_stat dwceqos_ethtool_stats[] = { - STAT_ITEM("tx_bytes", txoctetcount_gb), - STAT_ITEM("tx_packets", txpacketcount_gb), - STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), - STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), - STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), - STAT_ITEM("tx_pause_packets", txpausepackets), - STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), - STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), - STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), - STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), - STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), - STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), - STAT_ITEM("tx_underflow_errors", txunderflowerror), - STAT_ITEM("tx_lpi_count", txlpitranscntr), - - STAT_ITEM("rx_bytes", rxoctetcount_gb), - STAT_ITEM("rx_packets", rxpacketcount_gb), - STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), - STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), - STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), - STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), - STAT_ITEM("rx_pause_packets", rxpausepackets), - STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), - STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), - STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), - STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), - STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), - STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), - STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), - STAT_ITEM("rx_oversize_packets", rxoversize_g), - STAT_ITEM("rx_undersize_packets", rxundersize_g), - STAT_ITEM("rx_jabbers", rxjabbererror), - STAT_ITEM("rx_align_errors", rxalignmenterror), - STAT_ITEM("rx_crc_errors", rxcrcerror), - STAT_ITEM("rx_lpi_count", rxlpitranscntr), -}; - -/* Configuration of AXI bus parameters. - * These values depend on the parameters set on the MAC core as well - * as the AXI interconnect. - */ -struct dwceqos_bus_cfg { - /* Enable AXI low-power interface. */ - bool en_lpi; - /* Limit on number of outstanding AXI write requests. */ - u32 write_requests; - /* Limit on number of outstanding AXI read requests. */ - u32 read_requests; - /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ - u32 burst_map; - /* DMA Programmable burst length*/ - u32 tx_pbl; - u32 rx_pbl; -}; - -struct dwceqos_flowcontrol { - int autoneg; - int rx; - int rx_current; - int tx; - int tx_current; -}; - -struct net_local { - void __iomem *baseaddr; - struct clk *phy_ref_clk; - struct clk *apb_pclk; - - struct device_node *phy_node; - struct net_device *ndev; - struct platform_device *pdev; - - u32 msg_enable; - - struct tasklet_struct tx_bdreclaim_tasklet; - struct workqueue_struct *txtimeout_handler_wq; - struct work_struct txtimeout_reinit; - - phy_interface_t phy_interface; - struct mii_bus *mii_bus; - - unsigned int link; - unsigned int speed; - unsigned int duplex; - - struct napi_struct napi; - - /* DMA Descriptor Areas */ - struct ring_desc *rx_skb; - struct ring_desc *tx_skb; - - struct dwceqos_dma_desc *tx_descs; - struct dwceqos_dma_desc *rx_descs; - - /* DMA Mapped Descriptor areas*/ - dma_addr_t tx_descs_addr; - dma_addr_t rx_descs_addr; - dma_addr_t tx_descs_tail_addr; - dma_addr_t rx_descs_tail_addr; - - size_t tx_free; - size_t tx_next; - size_t rx_cur; - size_t tx_cur; - - /* Spinlocks for accessing DMA Descriptors */ - spinlock_t tx_lock; - - /* Spinlock for register read-modify-writes. */ - spinlock_t hw_lock; - - u32 feature0; - u32 feature1; - u32 feature2; - - struct dwceqos_bus_cfg bus_cfg; - bool en_tx_lpi_clockgating; - - int eee_enabled; - int eee_active; - int csr_val; - u32 gso_size; - - struct dwceqos_mmc_counters mmc_counters; - /* Protect the mmc_counter updates. */ - spinlock_t stats_lock; - u32 mmc_rx_counters_mask; - u32 mmc_tx_counters_mask; - - struct dwceqos_flowcontrol flowcontrol; - - /* Tracks the intermediate state of phy started but hardware - * init not finished yet. - */ - bool phy_defer; -}; - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask); - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n); -static int dwceqos_stop(struct net_device *ndev); -static int dwceqos_open(struct net_device *ndev); -static void dwceqos_tx_poll_demand(struct net_local *lp); - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); - -static void dwceqos_reset_state(struct net_local *lp); - -#define dwceqos_read(lp, reg) \ - readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) -#define dwceqos_write(lp, reg, val) \ - writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) - -static void dwceqos_reset_state(struct net_local *lp) -{ - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.rx_current = 0; - lp->flowcontrol.tx_current = 0; - lp->eee_active = 0; - lp->eee_enabled = 0; -} - -static void print_descriptor(struct net_local *lp, int index, int tx) -{ - struct dwceqos_dma_desc *dd; - - if (tx) - dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; - else - dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; - - pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", - index, dd); - pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, - dd->des3); -} - -static void print_status(struct net_local *lp) -{ - size_t desci, i; - - pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, - lp->tx_cur, lp->tx_next); - - print_descriptor(lp, lp->rx_cur, 0); - - for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; - i < DWCEQOS_TX_DCNT; - ++i) { - print_descriptor(lp, desci, 1); - desci = (desci + 1) % DWCEQOS_TX_DCNT; - } - - pr_info("DMA_Debug_Status0: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); - pr_info("DMA_CH0_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); - pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", - dwceqos_read(lp, 0x1144)); - pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", - dwceqos_read(lp, 0x1154)); - pr_info("MTL_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); - pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); - pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); - pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), - dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); -} - -static void dwceqos_mdio_set_csr(struct net_local *lp) -{ - int rate = clk_get_rate(lp->apb_pclk); - - if (rate <= 20000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; - else if (rate <= 35000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; - else if (rate <= 60000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; - else if (rate <= 100000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; - else if (rate <= 150000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; - else if (rate <= 250000000) - lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; -} - -/* Simple MDIO functions implementing mii_bus */ -static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - int data; - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_READ; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - - data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); - if (i == 5) { - netdev_warn(lp->ndev, "MDIO read timed out\n"); - data = 0xffff; - } - - return data & 0xffff; -} - -static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, - u16 value) -{ - struct net_local *lp = bus->priv; - u32 regval; - int i; - - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); - - regval = DWCEQOS_MDIO_PHYADDR(mii_id) | - DWCEQOS_MDIO_PHYREG(phyreg) | - DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | - DWCEQOS_MAC_MDIO_ADDR_GB | - DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; - dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); - - for (i = 0; i < 5; ++i) { - usleep_range(64, 128); - if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & - DWCEQOS_MAC_MDIO_ADDR_GB)) - break; - } - if (i == 5) - netdev_warn(lp->ndev, "MDIO write timed out\n"); - return 0; -} - -static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return phy_mii_ioctl(phydev, rq, cmd); - default: - dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); - return -EOPNOTSUPP; - } -} - -static void dwceqos_link_down(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - /* Indicate link down to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_link_up(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - unsigned long flags; - - /* Indicate link up to the LPI state machine */ - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - - lp->eee_active = !phy_init_eee(ndev->phydev, 0); - - /* Check for changed EEE capability */ - if (!lp->eee_active && lp->eee_enabled) { - lp->eee_enabled = 0; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } -} - -static void dwceqos_set_speed(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - struct phy_device *phydev = ndev->phydev; - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | - DWCEQOS_MAC_CFG_DM); - - if (phydev->duplex) - regval |= DWCEQOS_MAC_CFG_DM; - if (phydev->speed == SPEED_10) { - regval |= DWCEQOS_MAC_CFG_PS; - } else if (phydev->speed == SPEED_100) { - regval |= DWCEQOS_MAC_CFG_PS | - DWCEQOS_MAC_CFG_FES; - } else if (phydev->speed != SPEED_1000) { - netdev_err(lp->ndev, - "unknown PHY speed %d\n", - phydev->speed); - return; - } - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); -} - -static void dwceqos_adjust_link(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; - - if (lp->phy_defer) - return; - - if (phydev->link) { - if ((lp->speed != phydev->speed) || - (lp->duplex != phydev->duplex)) { - dwceqos_set_speed(lp); - - lp->speed = phydev->speed; - lp->duplex = phydev->duplex; - status_change = 1; - } - - if (lp->flowcontrol.autoneg) { - lp->flowcontrol.rx = phydev->pause || - phydev->asym_pause; - lp->flowcontrol.tx = phydev->pause || - phydev->asym_pause; - } - - if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set rx flow to %d\n", - lp->flowcontrol.rx); - dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); - lp->flowcontrol.rx_current = lp->flowcontrol.rx; - } - if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { - if (netif_msg_link(lp)) - netdev_dbg(ndev, "set tx flow to %d\n", - lp->flowcontrol.tx); - dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); - lp->flowcontrol.tx_current = lp->flowcontrol.tx; - } - } - - if (phydev->link != lp->link) { - lp->link = phydev->link; - status_change = 1; - } - - if (status_change) { - if (phydev->link) { - netif_trans_update(lp->ndev); - dwceqos_link_up(lp); - } else { - dwceqos_link_down(lp); - } - phy_print_status(phydev); - } -} - -static int dwceqos_mii_probe(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = NULL; - - if (lp->phy_node) { - phydev = of_phy_connect(lp->ndev, - lp->phy_node, - &dwceqos_adjust_link, - 0, - lp->phy_interface); - - if (!phydev) { - netdev_err(ndev, "no PHY found\n"); - return -1; - } - } else { - netdev_err(ndev, "no PHY configured\n"); - return -ENODEV; - } - - if (netif_msg_probe(lp)) - phy_attached_info(phydev); - - phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - - lp->link = 0; - lp->speed = 0; - lp->duplex = DUPLEX_UNKNOWN; - lp->flowcontrol.autoneg = AUTONEG_ENABLE; - - return 0; -} - -static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) -{ - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); - goto err_out; - } - - new_skb_baddr = dma_map_single(lp->ndev->dev.parent, - new_skb->data, DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - new_skb = NULL; - goto err_out; - } - - lp->rx_descs[index].des0 = new_skb_baddr; - lp->rx_descs[index].des1 = 0; - lp->rx_descs[index].des2 = 0; - lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_BUF1V | - DWCEQOS_DMA_RDES3_OWN; - - lp->rx_skb[index].mapping = new_skb_baddr; - lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; - -err_out: - lp->rx_skb[index].skb = new_skb; -} - -static void dwceqos_clean_rings(struct net_local *lp) -{ - int i; - - if (lp->rx_skb) { - for (i = 0; i < DWCEQOS_RX_DCNT; i++) { - if (lp->rx_skb[i].skb) { - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[i].mapping, - lp->rx_skb[i].len, - DMA_FROM_DEVICE); - - dev_kfree_skb(lp->rx_skb[i].skb); - lp->rx_skb[i].skb = NULL; - lp->rx_skb[i].mapping = 0; - } - } - } - - if (lp->tx_skb) { - for (i = 0; i < DWCEQOS_TX_DCNT; i++) { - if (lp->tx_skb[i].skb) { - dev_kfree_skb(lp->tx_skb[i].skb); - lp->tx_skb[i].skb = NULL; - } - if (lp->tx_skb[i].mapping) { - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - lp->tx_skb[i].mapping = 0; - } - } - } -} - -static void dwceqos_descriptor_free(struct net_local *lp) -{ - int size; - - dwceqos_clean_rings(lp); - - kfree(lp->tx_skb); - lp->tx_skb = NULL; - kfree(lp->rx_skb); - lp->rx_skb = NULL; - - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->rx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->rx_descs), lp->rx_descs_addr); - lp->rx_descs = NULL; - } - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - if (lp->tx_descs) { - dma_free_coherent(lp->ndev->dev.parent, size, - (void *)(lp->tx_descs), lp->tx_descs_addr); - lp->tx_descs = NULL; - } -} - -static int dwceqos_descriptor_init(struct net_local *lp) -{ - int size; - u32 i; - - lp->gso_size = 0; - - lp->tx_skb = NULL; - lp->rx_skb = NULL; - lp->rx_descs = NULL; - lp->tx_descs = NULL; - - /* Reset the DMA indexes */ - lp->rx_cur = 0; - lp->tx_cur = 0; - lp->tx_next = 0; - lp->tx_free = DWCEQOS_TX_DCNT; - - /* Allocate Ring descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); - lp->rx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->rx_skb) - goto err_out; - - size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); - lp->tx_skb = kzalloc(size, GFP_KERNEL); - if (!lp->tx_skb) - goto err_out; - - /* Allocate DMA descriptors */ - size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, GFP_KERNEL); - if (!lp->rx_descs) - goto err_out; - lp->rx_descs_tail_addr = lp->rx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; - - size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); - lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, GFP_KERNEL); - if (!lp->tx_descs) - goto err_out; - lp->tx_descs_tail_addr = lp->tx_descs_addr + - sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; - - /* Initialize RX Ring Descriptors and buffers */ - for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { - dwceqos_alloc_rxring_desc(lp, i); - if (!(lp->rx_skb[lp->rx_cur].skb)) - goto err_out; - } - - /* Initialize TX Descriptors */ - for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { - lp->tx_descs[i].des0 = 0; - lp->tx_descs[i].des1 = 0; - lp->tx_descs[i].des2 = 0; - lp->tx_descs[i].des3 = 0; - } - - /* Make descriptor writes visible to the DMA. */ - wmb(); - - return 0; - -err_out: - dwceqos_descriptor_free(lp); - return -ENOMEM; -} - -static int dwceqos_packet_avail(struct net_local *lp) -{ - return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); -} - -static void dwceqos_get_hwfeatures(struct net_local *lp) -{ - lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); - lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); - lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); -} - -static void dwceqos_dma_enable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_txirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_TIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_enable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval |= DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_dma_disable_rxirq(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); - regval &= ~DWCEQOS_DMA_CH0_IE_RIE; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_enable_mmc_interrupt(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); -} - -static int dwceqos_mii_init(struct net_local *lp) -{ - int ret = -ENXIO; - struct resource res; - struct device_node *mdionode; - - mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); - - if (!mdionode) - return 0; - - lp->mii_bus = mdiobus_alloc(); - if (!lp->mii_bus) { - ret = -ENOMEM; - goto err_out; - } - - lp->mii_bus->name = "DWCEQOS MII bus"; - lp->mii_bus->read = &dwceqos_mdio_read; - lp->mii_bus->write = &dwceqos_mdio_write; - lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->pdev->dev; - - of_address_to_resource(lp->pdev->dev.of_node, 0, &res); - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", - (unsigned long long)res.start); - if (of_mdiobus_register(lp->mii_bus, mdionode)) - goto err_out_free_mdiobus; - - return 0; - -err_out_free_mdiobus: - mdiobus_free(lp->mii_bus); -err_out: - of_node_put(mdionode); - return ret; -} - -/* DMA reset. When issued also resets all MTL and MAC registers as well */ -static void dwceqos_reset_hw(struct net_local *lp) -{ - /* Wait (at most) 0.5 seconds for DMA reset*/ - int i = 5000; - u32 reg; - - /* Force gigabit to guarantee a TX clock for GMII. */ - reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); - reg |= DWCEQOS_MAC_CFG_DM; - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); - - do { - udelay(100); - i--; - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); - } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); - /* We might experience a timeout if the chip clock mux is broken */ - if (!i) - netdev_err(lp->ndev, "DMA reset timed out!\n"); -} - -static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) -{ - if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { - netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } - if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { - netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? - "read" : "write", - dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? - "descr" : "data", - dma_status); - - print_status(lp); - } -} - -static void dwceqos_mmc_interrupt(struct net_local *lp) -{ - unsigned long flags; - - spin_lock_irqsave(&lp->stats_lock, flags); - - /* A latched mmc interrupt can not be masked, we must read - * all the counters with an interrupt pending. - */ - dwceqos_read_mmc_counters(lp, - dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), - dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); - - spin_unlock_irqrestore(&lp->stats_lock, flags); -} - -static void dwceqos_mac_interrupt(struct net_local *lp) -{ - u32 cause; - - cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); - - if (cause & DWCEQOS_MAC_IS_MMC_INT) - dwceqos_mmc_interrupt(lp); -} - -static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = dev_id; - struct net_local *lp = netdev_priv(ndev); - - u32 cause; - u32 dma_status; - irqreturn_t ret = IRQ_NONE; - - cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); - /* DMA Channel 0 Interrupt */ - if (cause & DWCEQOS_DMA_IS_DC0IS) { - dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); - - /* Transmit Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { - tasklet_schedule(&lp->tx_bdreclaim_tasklet); - dwceqos_dma_disable_txirq(lp); - } - - /* Receive Interrupt */ - if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { - /* Disable RX IRQs */ - dwceqos_dma_disable_rxirq(lp); - napi_schedule(&lp->napi); - } - - /* Fatal Bus Error interrupt */ - if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { - dwceqos_fatal_bus_error(lp, dma_status); - - /* errata 9000831707 */ - dma_status |= DWCEQOS_DMA_CH0_IS_TEB | - DWCEQOS_DMA_CH0_IS_REB; - } - - /* Ack all DMA Channel 0 IRQs */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MTLIS) { - u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); - - dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); - ret = IRQ_HANDLED; - } - - if (cause & DWCEQOS_DMA_IS_MACIS) { - dwceqos_mac_interrupt(lp); - ret = IRQ_HANDLED; - } - return ret; -} - -static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); - if (enable) - regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - else - regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) -{ - u32 regval; - unsigned long flags; - - spin_lock_irqsave(&lp->hw_lock, flags); - - /* MTL flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - if (enable) - regval |= DWCEQOS_MTL_RXQ_EHFC; - else - regval &= ~DWCEQOS_MTL_RXQ_EHFC; - - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - /* MAC flow control */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); - if (enable) - regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; - else - regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_flow_control(struct net_local *lp) -{ - u32 regval; - unsigned long flags; - int RQS, RFD, RFA; - - spin_lock_irqsave(&lp->hw_lock, flags); - - regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); - - /* The queue size is in units of 256 bytes. We want 512 bytes units for - * the threshold fields. - */ - RQS = ((regval >> 20) & 0x3FF) + 1; - RQS /= 2; - - /* The thresholds are relative to a full queue, with a bias - * of 1 KiByte below full. - */ - RFD = RQS / 2 - 2; - RFA = RQS / 8 - 2; - - regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); - - if (RFD >= 0 && RFA >= 0) { - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - } else { - netdev_warn(lp->ndev, - "FIFO too small for flow control."); - } - - regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | - DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; - - dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); - - spin_unlock_irqrestore(&lp->hw_lock, flags); -} - -static void dwceqos_configure_clock(struct net_local *lp) -{ - unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; - - BUG_ON(!rate_mhz); - - dwceqos_write(lp, - REG_DWCEQOS_MAC_1US_TIC_COUNTER, - DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); -} - -static void dwceqos_configure_bus(struct net_local *lp) -{ - u32 sysbus_reg; - - /* N.B. We do not support the Fixed Burst mode because it - * opens a race window by making HW access to DMA descriptors - * non-atomic. - */ - - sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; - - if (lp->bus_cfg.en_lpi) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; - - if (lp->bus_cfg.burst_map) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - lp->bus_cfg.burst_map); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( - DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); - - if (lp->bus_cfg.read_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - lp->bus_cfg.read_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); - - if (lp->bus_cfg.write_requests) - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - lp->bus_cfg.write_requests - 1); - else - sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( - DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); - - if (netif_msg_hw(lp)) - netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); - - dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); -} - -static void dwceqos_init_hw(struct net_local *lp) -{ - struct net_device *ndev = lp->ndev; - u32 regval; - u32 buswidth; - u32 dma_skip; - - /* Software reset */ - dwceqos_reset_hw(lp); - - dwceqos_configure_bus(lp); - - /* Probe data bus width, 32/64/128 bits. */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); - buswidth = (regval ^ 0xF) + 1; - - /* Cache-align dma descriptors. */ - dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, - DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | - DWCEQOS_DMA_CH_CTRL_PBLX8); - - /* Initialize DMA Channel 0 */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, - (u32)lp->tx_descs_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, - (u32)lp->rx_descs_addr); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - if (lp->bus_cfg.tx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - /* Enable TSO if the HW support it */ - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - regval |= DWCEQOS_DMA_CH_TX_TSE; - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); - - if (lp->bus_cfg.rx_pbl) - regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); - else - regval = DWCEQOS_DMA_CH_CTRL_PBL(2); - - regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - regval |= DWCEQOS_DMA_CH_CTRL_START; - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); - - /* Initialize MTL Queues */ - regval = DWCEQOS_MTL_SCHALG_STRICT; - dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); - - regval = DWCEQOS_MTL_TXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | - DWCEQOS_MTL_TXQ_TTC512; - dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); - - regval = DWCEQOS_MTL_RXQ_SIZE( - DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | - DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; - dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); - - dwceqos_configure_flow_control(lp); - - /* Initialize MAC */ - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - lp->eee_enabled = 0; - - dwceqos_configure_clock(lp); - - /* MMC counters */ - - /* probe implemented counters */ - dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); - dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); - lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); - lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); - - dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | - DWCEQOS_MMC_CTRL_RSTONRD); - dwceqos_enable_mmc_interrupt(lp); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); - dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); - - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | - DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - /* Start TX DMA */ - regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, - regval | DWCEQOS_DMA_CH_CTRL_START); - - /* Enable MAC TX/RX */ - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); - dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, - regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); - - lp->phy_defer = false; - mutex_lock(&ndev->phydev->lock); - phy_read_status(ndev->phydev); - dwceqos_adjust_link(lp->ndev); - mutex_unlock(&ndev->phydev->lock); -} - -static void dwceqos_tx_reclaim(unsigned long data) -{ - struct net_device *ndev = (struct net_device *)data; - struct net_local *lp = netdev_priv(ndev); - unsigned int tx_bytes = 0; - unsigned int tx_packets = 0; - - spin_lock(&lp->tx_lock); - - while (lp->tx_free < DWCEQOS_TX_DCNT) { - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; - struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; - - /* Descriptor still being held by DMA ? */ - if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) - break; - - if (rd->mapping) - dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, - DMA_TO_DEVICE); - - if (unlikely(rd->skb)) { - ++tx_packets; - tx_bytes += rd->skb->len; - dev_consume_skb_any(rd->skb); - } - - rd->skb = NULL; - rd->mapping = 0; - lp->tx_free++; - lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; - - if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && - (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { - if (netif_msg_tx_err(lp)) - netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", - dd->des3); - if (netif_msg_hw(lp)) - print_status(lp); - } - } - spin_unlock(&lp->tx_lock); - - netdev_completed_queue(ndev, tx_packets, tx_bytes); - - dwceqos_dma_enable_txirq(lp); - netif_wake_queue(ndev); -} - -static int dwceqos_rx(struct net_local *lp, int budget) -{ - struct sk_buff *skb; - u32 tot_size = 0; - unsigned int n_packets = 0; - unsigned int n_descs = 0; - u32 len; - - struct dwceqos_dma_desc *dd; - struct sk_buff *new_skb; - dma_addr_t new_skb_baddr = 0; - - while (n_descs < budget) { - if (!dwceqos_packet_avail(lp)) - break; - - new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); - if (!new_skb) { - netdev_err(lp->ndev, "no memory for new sk_buff\n"); - break; - } - - /* Get dma handle of skb->data */ - new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, - new_skb->data, - DWCEQOS_RX_BUF_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { - netdev_err(lp->ndev, "DMA map error\n"); - dev_kfree_skb(new_skb); - break; - } - - /* Read descriptor data after reading owner bit. */ - dma_rmb(); - - dd = &lp->rx_descs[lp->rx_cur]; - len = DWCEQOS_DMA_RDES3_PL(dd->des3); - skb = lp->rx_skb[lp->rx_cur].skb; - - /* Unmap old buffer */ - dma_unmap_single(lp->ndev->dev.parent, - lp->rx_skb[lp->rx_cur].mapping, - lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); - - /* Discard packet on reception error or bad checksum */ - if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || - (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { - dev_kfree_skb(skb); - skb = NULL; - } else { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, lp->ndev); - switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { - case DWCEQOS_DMA_RDES1_PT_UDP: - case DWCEQOS_DMA_RDES1_PT_TCP: - case DWCEQOS_DMA_RDES1_PT_ICMP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } - } - - if (unlikely(!skb)) { - if (netif_msg_rx_err(lp)) - netdev_dbg(lp->ndev, "rx error: des3=%X\n", - lp->rx_descs[lp->rx_cur].des3); - } else { - tot_size += skb->len; - n_packets++; - - netif_receive_skb(skb); - } - - lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; - lp->rx_descs[lp->rx_cur].des1 = 0; - lp->rx_descs[lp->rx_cur].des2 = 0; - /* The DMA must observe des0/1/2 written before des3. */ - wmb(); - lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | - DWCEQOS_DMA_RDES3_OWN | - DWCEQOS_DMA_RDES3_BUF1V; - - lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; - lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; - lp->rx_skb[lp->rx_cur].skb = new_skb; - - n_descs++; - lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; - } - - /* Make sure any ownership update is written to the descriptors before - * DMA wakeup. - */ - wmb(); - - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); - /* Wake up RX by writing tail pointer */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, - lp->rx_descs_tail_addr); - - return n_descs; -} - -static int dwceqos_rx_poll(struct napi_struct *napi, int budget) -{ - struct net_local *lp = container_of(napi, struct net_local, napi); - int work_done = 0; - - work_done = dwceqos_rx(lp, budget - work_done); - - if (!dwceqos_packet_avail(lp) && work_done < budget) { - napi_complete(napi); - dwceqos_dma_enable_rxirq(lp); - } else { - work_done = budget; - } - - return work_done; -} - -/* Reinitialize function if a TX timed out */ -static void dwceqos_reinit_for_txtimeout(struct work_struct *data) -{ - struct net_local *lp = container_of(data, struct net_local, - txtimeout_reinit); - - netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", - DWCEQOS_TX_TIMEOUT); - - if (netif_msg_hw(lp)) - print_status(lp); - - rtnl_lock(); - dwceqos_stop(lp->ndev); - dwceqos_open(lp->ndev); - rtnl_unlock(); -} - -/* DT Probing function called by main probe */ -static inline int dwceqos_probe_config_dt(struct platform_device *pdev) -{ - struct net_device *ndev; - struct net_local *lp; - const void *mac_address; - struct dwceqos_bus_cfg *bus_cfg; - struct device_node *np = pdev->dev.of_node; - - ndev = platform_get_drvdata(pdev); - lp = netdev_priv(ndev); - bus_cfg = &lp->bus_cfg; - - /* Set the MAC address. */ - mac_address = of_get_mac_address(pdev->dev.of_node); - if (mac_address) - ether_addr_copy(ndev->dev_addr, mac_address); - - /* These are all optional parameters */ - lp->en_tx_lpi_clockgating = of_property_read_bool(np, - "snps,en-tx-lpi-clockgating"); - bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); - of_property_read_u32(np, "snps,write-requests", - &bus_cfg->write_requests); - of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); - of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); - of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); - of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); - - netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", - bus_cfg->en_lpi, - bus_cfg->write_requests, - bus_cfg->read_requests, - bus_cfg->burst_map, - bus_cfg->rx_pbl, - bus_cfg->tx_pbl); - - return 0; -} - -static int dwceqos_open(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - int res; - - dwceqos_reset_state(lp); - res = dwceqos_descriptor_init(lp); - if (res) { - netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); - return res; - } - netdev_reset_queue(ndev); - - /* The dwceqos reset state machine requires all phy clocks to complete, - * hence the unusual init order with phy_start first. - */ - lp->phy_defer = true; - phy_start(ndev->phydev); - dwceqos_init_hw(lp); - napi_enable(&lp->napi); - - netif_start_queue(ndev); - tasklet_enable(&lp->tx_bdreclaim_tasklet); - - /* Enable Interrupts -- do this only after we enable NAPI and the - * tasklet. - */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - - return 0; -} - -static bool dweqos_is_tx_dma_suspended(struct net_local *lp) -{ - u32 reg; - - reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); - reg = DMA_GET_TX_STATE_CH0(reg); - - return reg == DMA_TX_CH_SUSPENDED; -} - -static void dwceqos_drain_dma(struct net_local *lp) -{ - /* Wait for all pending TX buffers to be sent. Upper limit based - * on max frame size on a 10 Mbit link. - */ - size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; - - while (!dweqos_is_tx_dma_suspended(lp) && limit--) - usleep_range(100, 200); -} - -static int dwceqos_stop(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - tasklet_disable(&lp->tx_bdreclaim_tasklet); - napi_disable(&lp->napi); - - /* Stop all tx before we drain the tx dma. */ - netif_tx_lock_bh(lp->ndev); - netif_stop_queue(ndev); - netif_tx_unlock_bh(lp->ndev); - - dwceqos_drain_dma(lp); - dwceqos_reset_hw(lp); - phy_stop(ndev->phydev); - - dwceqos_descriptor_free(lp); - - return 0; -} - -static void dwceqos_dmadesc_set_ctx(struct net_local *lp, - unsigned short gso_size) -{ - struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; - - dd->des0 = 0; - dd->des1 = 0; - dd->des2 = gso_size; - dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; - - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; -} - -static void dwceqos_tx_poll_demand(struct net_local *lp) -{ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, - lp->tx_descs_tail_addr); -} - -struct dwceqos_tx { - size_t nr_descriptors; - size_t initial_descriptor; - size_t last_descriptor; - size_t prev_gso_size; - size_t network_header_len; -}; - -static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - size_t n = 1; - size_t i; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) - ++n; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / - BYTES_PER_DMA_DESC; - } - - tx->nr_descriptors = n; - tx->initial_descriptor = lp->tx_next; - tx->last_descriptor = lp->tx_next; - tx->prev_gso_size = lp->gso_size; - - tx->network_header_len = skb_transport_offset(skb); - if (skb_is_gso(skb)) - tx->network_header_len += tcp_hdrlen(skb); -} - -static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd; - struct dwceqos_dma_desc *dd; - size_t payload_len; - dma_addr_t dma_handle; - - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { - dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); - lp->gso_size = skb_shinfo(skb)->gso_size; - } - - dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "TX DMA Mapping error\n"); - return -ENOMEM; - } - - rd = &lp->tx_skb[lp->tx_next]; - dd = &lp->tx_descs[lp->tx_next]; - - rd->skb = NULL; - rd->len = skb_headlen(skb); - rd->mapping = dma_handle; - - /* Set up DMA Descriptor */ - dd->des0 = dma_handle; - - if (skb_is_gso(skb)) { - payload_len = skb_headlen(skb) - tx->network_header_len; - - if (payload_len) - dd->des1 = dma_handle + tx->network_header_len; - dd->des2 = tx->network_header_len | - DWCEQOS_DMA_DES2_B2L(payload_len); - dd->des3 = DWCEQOS_DMA_TDES3_TSE | - DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | - (skb->len - tx->network_header_len); - } else { - dd->des1 = 0; - dd->des2 = skb_headlen(skb); - dd->des3 = skb->len; - - switch (skb->ip_summed) { - case CHECKSUM_PARTIAL: - dd->des3 |= DWCEQOS_DMA_TDES3_CA; - case CHECKSUM_NONE: - case CHECKSUM_UNNECESSARY: - case CHECKSUM_COMPLETE: - default: - break; - } - } - - dd->des3 |= DWCEQOS_DMA_TDES3_FD; - if (lp->tx_next != tx->initial_descriptor) - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - - return 0; -} - -static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - struct ring_desc *rd = NULL; - struct dwceqos_dma_desc *dd; - dma_addr_t dma_handle; - size_t i; - - /* Setup more ring and DMA descriptor if the packet is fragmented */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - size_t frag_size; - size_t consumed_size; - - /* Map DMA Area */ - dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { - netdev_err(lp->ndev, "DMA Mapping error\n"); - return -ENOMEM; - } - - /* order-3 fragments span more than one descriptor. */ - frag_size = skb_frag_size(frag); - consumed_size = 0; - while (consumed_size < frag_size) { - size_t dma_size = min_t(size_t, 16376, - frag_size - consumed_size); - - rd = &lp->tx_skb[lp->tx_next]; - memset(rd, 0, sizeof(*rd)); - - dd = &lp->tx_descs[lp->tx_next]; - - /* Set DMA Descriptor fields */ - dd->des0 = dma_handle + consumed_size; - dd->des1 = 0; - dd->des2 = dma_size; - - if (skb_is_gso(skb)) - dd->des3 = (skb->len - tx->network_header_len); - else - dd->des3 = skb->len; - - dd->des3 |= DWCEQOS_DMA_TDES3_OWN; - - tx->last_descriptor = lp->tx_next; - lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; - consumed_size += dma_size; - } - - rd->len = skb_frag_size(frag); - rd->mapping = dma_handle; - } - - return 0; -} - -static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, - struct dwceqos_tx *tx) -{ - lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; - lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; - - lp->tx_skb[tx->last_descriptor].skb = skb; - - /* Make all descriptor updates visible to the DMA before setting the - * owner bit. - */ - wmb(); - - lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; - - /* Make the owner bit visible before TX wakeup. */ - wmb(); - - dwceqos_tx_poll_demand(lp); -} - -static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) -{ - size_t i = tx->initial_descriptor; - - while (i != lp->tx_next) { - if (lp->tx_skb[i].mapping) - dma_unmap_single(lp->ndev->dev.parent, - lp->tx_skb[i].mapping, - lp->tx_skb[i].len, - DMA_TO_DEVICE); - - lp->tx_skb[i].mapping = 0; - lp->tx_skb[i].skb = NULL; - - memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); - - i = (i + 1) % DWCEQOS_TX_DCNT; - } - - lp->tx_next = tx->initial_descriptor; - lp->gso_size = tx->prev_gso_size; -} - -static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_tx trans; - int err; - - dwceqos_tx_prepare(skb, lp, &trans); - if (lp->tx_free < trans.nr_descriptors) { - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; - } - - err = dwceqos_tx_linear(skb, lp, &trans); - if (err) - goto tx_error; - - err = dwceqos_tx_frags(skb, lp, &trans); - if (err) - goto tx_error; - - WARN_ON(lp->tx_next != - ((trans.initial_descriptor + trans.nr_descriptors) % - DWCEQOS_TX_DCNT)); - - spin_lock_bh(&lp->tx_lock); - lp->tx_free -= trans.nr_descriptors; - dwceqos_tx_finalize(skb, lp, &trans); - netdev_sent_queue(ndev, skb->len); - spin_unlock_bh(&lp->tx_lock); - - netif_trans_update(ndev); - return 0; - -tx_error: - dwceqos_tx_rollback(lp, &trans); - dev_kfree_skb_any(skb); - return 0; -} - -/* Set MAC address and then update HW accordingly */ -static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) -{ - struct net_local *lp = netdev_priv(ndev); - struct sockaddr *hwaddr = (struct sockaddr *)addr; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(hwaddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - return 0; -} - -static void dwceqos_tx_timeout(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - - queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); -} - -static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, - unsigned int reg_n) -{ - unsigned long data; - - data = (addr[5] << 8) | addr[4]; - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), - data | DWCEQOS_MAC_MAC_ADDR_HI_EN); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); -} - -static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) -{ - /* Do not disable MAC address 0 */ - if (reg_n != 0) - dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); -} - -static void dwceqos_set_rx_mode(struct net_device *ndev) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval = 0; - u32 mc_filter[2]; - int reg = 1; - struct netdev_hw_addr *ha; - unsigned int max_mac_addr; - - max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); - - if (ndev->flags & IFF_PROMISC) { - regval = DWCEQOS_MAC_PKT_FILT_PR; - } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || - (ndev->flags & IFF_ALLMULTI))) { - regval = DWCEQOS_MAC_PKT_FILT_PM; - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); - } else if (!netdev_mc_empty(ndev)) { - regval = DWCEQOS_MAC_PKT_FILT_HMC; - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, ndev) { - /* The upper 6 bits of the calculated CRC are used to - * index the contens of the hash table - */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; - /* The most significant bit determines the register - * to use (H/L) while the other 5 bits determine - * the bit within the register. - */ - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); - dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); - } - if (netdev_uc_count(ndev) > max_mac_addr) { - regval |= DWCEQOS_MAC_PKT_FILT_PR; - } else { - netdev_for_each_uc_addr(ha, ndev) { - dwceqos_set_umac_addr(lp, ha->addr, reg); - reg++; - } - for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) - dwceqos_disable_umac_addr(lp, reg); - } - dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void dwceqos_poll_controller(struct net_device *ndev) -{ - disable_irq(ndev->irq); - dwceqos_interrupt(ndev->irq, ndev); - enable_irq(ndev->irq); -} -#endif - -static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, - u32 tx_mask) -{ - if (tx_mask & BIT(27)) - lp->mmc_counters.txlpitranscntr += - dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); - if (tx_mask & BIT(26)) - lp->mmc_counters.txpiuscntr += - dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); - if (tx_mask & BIT(25)) - lp->mmc_counters.txoversize_g += - dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); - if (tx_mask & BIT(24)) - lp->mmc_counters.txvlanpackets_g += - dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); - if (tx_mask & BIT(23)) - lp->mmc_counters.txpausepackets += - dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); - if (tx_mask & BIT(22)) - lp->mmc_counters.txexcessdef += - dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); - if (tx_mask & BIT(21)) - lp->mmc_counters.txpacketcount_g += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); - if (tx_mask & BIT(20)) - lp->mmc_counters.txoctetcount_g += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); - if (tx_mask & BIT(19)) - lp->mmc_counters.txcarriererror += - dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); - if (tx_mask & BIT(18)) - lp->mmc_counters.txexcesscol += - dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); - if (tx_mask & BIT(17)) - lp->mmc_counters.txlatecol += - dwceqos_read(lp, DWC_MMC_TXLATECOL); - if (tx_mask & BIT(16)) - lp->mmc_counters.txdeferred += - dwceqos_read(lp, DWC_MMC_TXDEFERRED); - if (tx_mask & BIT(15)) - lp->mmc_counters.txmulticol_g += - dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); - if (tx_mask & BIT(14)) - lp->mmc_counters.txsinglecol_g += - dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); - if (tx_mask & BIT(13)) - lp->mmc_counters.txunderflowerror += - dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); - if (tx_mask & BIT(12)) - lp->mmc_counters.txbroadcastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); - if (tx_mask & BIT(11)) - lp->mmc_counters.txmulticastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); - if (tx_mask & BIT(10)) - lp->mmc_counters.txunicastpackets_gb += - dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); - if (tx_mask & BIT(9)) - lp->mmc_counters.tx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); - if (tx_mask & BIT(8)) - lp->mmc_counters.tx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); - if (tx_mask & BIT(7)) - lp->mmc_counters.tx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); - if (tx_mask & BIT(6)) - lp->mmc_counters.tx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); - if (tx_mask & BIT(5)) - lp->mmc_counters.tx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); - if (tx_mask & BIT(4)) - lp->mmc_counters.tx64octets_gb += - dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); - if (tx_mask & BIT(3)) - lp->mmc_counters.txmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); - if (tx_mask & BIT(2)) - lp->mmc_counters.txbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); - if (tx_mask & BIT(1)) - lp->mmc_counters.txpacketcount_gb += - dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); - if (tx_mask & BIT(0)) - lp->mmc_counters.txoctetcount_gb += - dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); - - if (rx_mask & BIT(27)) - lp->mmc_counters.rxlpitranscntr += - dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); - if (rx_mask & BIT(26)) - lp->mmc_counters.rxlpiuscntr += - dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); - if (rx_mask & BIT(25)) - lp->mmc_counters.rxctrlpackets_g += - dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); - if (rx_mask & BIT(24)) - lp->mmc_counters.rxrcverror += - dwceqos_read(lp, DWC_MMC_RXRCVERROR); - if (rx_mask & BIT(23)) - lp->mmc_counters.rxwatchdog += - dwceqos_read(lp, DWC_MMC_RXWATCHDOG); - if (rx_mask & BIT(22)) - lp->mmc_counters.rxvlanpackets_gb += - dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); - if (rx_mask & BIT(21)) - lp->mmc_counters.rxfifooverflow += - dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); - if (rx_mask & BIT(20)) - lp->mmc_counters.rxpausepackets += - dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); - if (rx_mask & BIT(19)) - lp->mmc_counters.rxoutofrangetype += - dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); - if (rx_mask & BIT(18)) - lp->mmc_counters.rxlengtherror += - dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); - if (rx_mask & BIT(17)) - lp->mmc_counters.rxunicastpackets_g += - dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); - if (rx_mask & BIT(16)) - lp->mmc_counters.rx1024tomaxoctets_gb += - dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); - if (rx_mask & BIT(15)) - lp->mmc_counters.rx512to1023octets_gb += - dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); - if (rx_mask & BIT(14)) - lp->mmc_counters.rx256to511octets_gb += - dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); - if (rx_mask & BIT(13)) - lp->mmc_counters.rx128to255octets_gb += - dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); - if (rx_mask & BIT(12)) - lp->mmc_counters.rx65to127octets_gb += - dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); - if (rx_mask & BIT(11)) - lp->mmc_counters.rx64octets_gb += - dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); - if (rx_mask & BIT(10)) - lp->mmc_counters.rxoversize_g += - dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); - if (rx_mask & BIT(9)) - lp->mmc_counters.rxundersize_g += - dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); - if (rx_mask & BIT(8)) - lp->mmc_counters.rxjabbererror += - dwceqos_read(lp, DWC_MMC_RXJABBERERROR); - if (rx_mask & BIT(7)) - lp->mmc_counters.rxrunterror += - dwceqos_read(lp, DWC_MMC_RXRUNTERROR); - if (rx_mask & BIT(6)) - lp->mmc_counters.rxalignmenterror += - dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); - if (rx_mask & BIT(5)) - lp->mmc_counters.rxcrcerror += - dwceqos_read(lp, DWC_MMC_RXCRCERROR); - if (rx_mask & BIT(4)) - lp->mmc_counters.rxmulticastpackets_g += - dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); - if (rx_mask & BIT(3)) - lp->mmc_counters.rxbroadcastpackets_g += - dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); - if (rx_mask & BIT(2)) - lp->mmc_counters.rxoctetcount_g += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); - if (rx_mask & BIT(1)) - lp->mmc_counters.rxoctetcount_gb += - dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); - if (rx_mask & BIT(0)) - lp->mmc_counters.rxpacketcount_gb += - dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); -} - -static struct rtnl_link_stats64* -dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) -{ - unsigned long flags; - struct net_local *lp = netdev_priv(ndev); - struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - s->rx_packets = hwstats->rxpacketcount_gb; - s->rx_bytes = hwstats->rxoctetcount_gb; - s->rx_errors = hwstats->rxpacketcount_gb - - hwstats->rxbroadcastpackets_g - - hwstats->rxmulticastpackets_g - - hwstats->rxunicastpackets_g; - s->multicast = hwstats->rxmulticastpackets_g; - s->rx_length_errors = hwstats->rxlengtherror; - s->rx_crc_errors = hwstats->rxcrcerror; - s->rx_fifo_errors = hwstats->rxfifooverflow; - - s->tx_packets = hwstats->txpacketcount_gb; - s->tx_bytes = hwstats->txoctetcount_gb; - - if (lp->mmc_tx_counters_mask & BIT(21)) - s->tx_errors = hwstats->txpacketcount_gb - - hwstats->txpacketcount_g; - else - s->tx_errors = hwstats->txunderflowerror + - hwstats->txcarriererror; - - return s; -} - -static void -dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) -{ - const struct net_local *lp = netdev_priv(ndev); - - strcpy(ed->driver, lp->pdev->dev.driver->name); - strcpy(ed->version, DRIVER_VERSION); -} - -static void dwceqos_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - const struct net_local *lp = netdev_priv(ndev); - - pp->autoneg = lp->flowcontrol.autoneg; - pp->tx_pause = lp->flowcontrol.tx; - pp->rx_pause = lp->flowcontrol.rx; -} - -static int dwceqos_set_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pp) -{ - struct net_local *lp = netdev_priv(ndev); - int ret = 0; - - lp->flowcontrol.autoneg = pp->autoneg; - if (pp->autoneg) { - ndev->phydev->advertising |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Asym_Pause; - } else { - ndev->phydev->advertising &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; - lp->flowcontrol.rx = pp->rx_pause; - lp->flowcontrol.tx = pp->tx_pause; - } - - if (netif_running(ndev)) - ret = phy_start_aneg(ndev->phydev); - - return ret; -} - -static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, - u8 *data) -{ - size_t i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, dwceqos_ethtool_stats[i].stat_name, - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } -} - -static void dwceqos_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct net_local *lp = netdev_priv(ndev); - unsigned long flags; - size_t i; - u8 *mmcstat = (u8 *)&lp->mmc_counters; - - spin_lock_irqsave(&lp->stats_lock, flags); - dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, - lp->mmc_tx_counters_mask); - spin_unlock_irqrestore(&lp->stats_lock, flags); - - for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { - memcpy(data, - mmcstat + dwceqos_ethtool_stats[i].offset, - sizeof(u64)); - data++; - } -} - -static int dwceqos_get_sset_count(struct net_device *ndev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(dwceqos_ethtool_stats); - - return -EOPNOTSUPP; -} - -static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *space) -{ - const struct net_local *lp = netdev_priv(dev); - u32 *reg_space = (u32 *)space; - int reg_offset; - int reg_ix = 0; - - /* MAC registers */ - for (reg_offset = START_MAC_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - /* MTL registers */ - for (reg_offset = START_MTL_REG_OFFSET; - reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - /* DMA registers */ - for (reg_offset = START_DMA_REG_OFFSET; - reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { - reg_space[reg_ix] = dwceqos_read(lp, reg_offset); - reg_ix++; - } - - BUG_ON(4 * reg_ix > REG_SPACE_SIZE); -} - -static int dwceqos_get_regs_len(struct net_device *dev) -{ - return REG_SPACE_SIZE; -} - -static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; -} - -static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) -{ - return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; -} - -static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 lpi_status; - u32 lpi_enabled; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - edata->eee_active = lp->eee_active; - edata->eee_enabled = lp->eee_enabled; - edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); - lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); - edata->tx_lpi_enabled = lpi_enabled; - - if (netif_msg_hw(lp)) { - u32 regval; - - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - - netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", - dwceqos_get_rx_lpi_state(regval), - dwceqos_get_tx_lpi_state(regval)); - } - - return phy_ethtool_get_eee(ndev->phydev, edata); -} - -static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct net_local *lp = netdev_priv(ndev); - u32 regval; - unsigned long flags; - - if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) - return -EOPNOTSUPP; - - if (edata->eee_enabled && !lp->eee_active) - return -EOPNOTSUPP; - - if (edata->tx_lpi_enabled) { - if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || - edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) - return -EINVAL; - } - - lp->eee_enabled = edata->eee_enabled; - - if (edata->eee_enabled && edata->tx_lpi_enabled) { - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, - edata->tx_lpi_timer); - - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; - if (lp->en_tx_lpi_clockgating) - regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } else { - spin_lock_irqsave(&lp->hw_lock, flags); - regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); - regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; - dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); - spin_unlock_irqrestore(&lp->hw_lock, flags); - } - - return phy_ethtool_set_eee(ndev->phydev, edata); -} - -static u32 dwceqos_get_msglevel(struct net_device *ndev) -{ - const struct net_local *lp = netdev_priv(ndev); - - return lp->msg_enable; -} - -static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) -{ - struct net_local *lp = netdev_priv(ndev); - - lp->msg_enable = msglevel; -} - -static const struct ethtool_ops dwceqos_ethtool_ops = { - .get_drvinfo = dwceqos_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_pauseparam = dwceqos_get_pauseparam, - .set_pauseparam = dwceqos_set_pauseparam, - .get_strings = dwceqos_get_strings, - .get_ethtool_stats = dwceqos_get_ethtool_stats, - .get_sset_count = dwceqos_get_sset_count, - .get_regs = dwceqos_get_regs, - .get_regs_len = dwceqos_get_regs_len, - .get_eee = dwceqos_get_eee, - .set_eee = dwceqos_set_eee, - .get_msglevel = dwceqos_get_msglevel, - .set_msglevel = dwceqos_set_msglevel, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static const struct net_device_ops netdev_ops = { - .ndo_open = dwceqos_open, - .ndo_stop = dwceqos_stop, - .ndo_start_xmit = dwceqos_start_xmit, - .ndo_set_rx_mode = dwceqos_set_rx_mode, - .ndo_set_mac_address = dwceqos_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = dwceqos_poll_controller, -#endif - .ndo_do_ioctl = dwceqos_ioctl, - .ndo_tx_timeout = dwceqos_tx_timeout, - .ndo_get_stats64 = dwceqos_get_stats64, -}; - -static const struct of_device_id dwceq_of_match[] = { - { .compatible = "snps,dwc-qos-ethernet-4.10", }, - {} -}; -MODULE_DEVICE_TABLE(of, dwceq_of_match); - -static int dwceqos_probe(struct platform_device *pdev) -{ - struct resource *r_mem = NULL; - struct net_device *ndev; - struct net_local *lp; - int ret = -ENXIO; - - r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r_mem) { - dev_err(&pdev->dev, "no IO resource defined.\n"); - return -ENXIO; - } - - ndev = alloc_etherdev(sizeof(*lp)); - if (!ndev) { - dev_err(&pdev->dev, "etherdev allocation failed.\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - lp = netdev_priv(ndev); - lp->ndev = ndev; - lp->pdev = pdev; - lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); - - spin_lock_init(&lp->tx_lock); - spin_lock_init(&lp->hw_lock); - spin_lock_init(&lp->stats_lock); - - lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(lp->apb_pclk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - ret = PTR_ERR(lp->apb_pclk); - goto err_out_free_netdev; - } - - ret = clk_prepare_enable(lp->apb_pclk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable APER clock.\n"); - goto err_out_free_netdev; - } - - lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); - if (IS_ERR(lp->baseaddr)) { - dev_err(&pdev->dev, "failed to map baseaddress.\n"); - ret = PTR_ERR(lp->baseaddr); - goto err_out_clk_dis_aper; - } - - ndev->irq = platform_get_irq(pdev, 0); - ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; - ndev->netdev_ops = &netdev_ops; - ndev->ethtool_ops = &dwceqos_ethtool_ops; - ndev->base_addr = r_mem->start; - - dwceqos_get_hwfeatures(lp); - dwceqos_mdio_set_csr(lp); - - ndev->hw_features = NETIF_F_SG; - - if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) - ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) - ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - - if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) - ndev->hw_features |= NETIF_F_RXCSUM; - - ndev->features = ndev->hw_features; - - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(lp->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_clk_dis_aper; - } - - ret = clk_prepare_enable(lp->phy_ref_clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_clk_dis_aper; - } - - lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, - "phy-handle", 0); - if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { - ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_clk_dis_phy; - } - - lp->phy_node = of_node_get(lp->pdev->dev.of_node); - } - - ret = of_get_phy_mode(lp->pdev->dev.of_node); - if (ret < 0) { - dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_deregister_fixed_link; - } - - lp->phy_interface = ret; - - ret = dwceqos_mii_init(lp); - if (ret) { - dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_deregister_fixed_link; - } - - ret = dwceqos_mii_probe(ndev); - if (ret != 0) { - netdev_err(ndev, "mii_probe fail.\n"); - ret = -ENXIO; - goto err_out_deregister_fixed_link; - } - - dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); - - tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, - (unsigned long)ndev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - - lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, - WQ_MEM_RECLAIM, 0); - INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); - - platform_set_drvdata(pdev, ndev); - ret = dwceqos_probe_config_dt(pdev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", - ret); - goto err_out_deregister_fixed_link; - } - dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", - pdev->id, ndev->base_addr, ndev->irq); - - ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, - ndev->name, ndev); - if (ret) { - dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", - ndev->irq, ret); - goto err_out_deregister_fixed_link; - } - - if (netif_msg_probe(lp)) - netdev_dbg(ndev, "net_local@%p\n", lp); - - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_deregister_fixed_link; - } - - return 0; - -err_out_deregister_fixed_link: - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); -err_out_clk_dis_phy: - clk_disable_unprepare(lp->phy_ref_clk); -err_out_clk_dis_aper: - clk_disable_unprepare(lp->apb_pclk); -err_out_free_netdev: - of_node_put(lp->phy_node); - free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return ret; -} - -static int dwceqos_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct net_local *lp; - - if (ndev) { - lp = netdev_priv(ndev); - - if (ndev->phydev) { - phy_disconnect(ndev->phydev); - if (of_phy_is_fixed_link(pdev->dev.of_node)) - of_phy_deregister_fixed_link(pdev->dev.of_node); - } - mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); - - unregister_netdev(ndev); - - clk_disable_unprepare(lp->phy_ref_clk); - clk_disable_unprepare(lp->apb_pclk); - - free_netdev(ndev); - } - - return 0; -} - -static struct platform_driver dwceqos_driver = { - .probe = dwceqos_probe, - .remove = dwceqos_remove, - .driver = { - .name = DRIVER_NAME, - .of_match_table = dwceq_of_match, - }, -}; - -module_platform_driver(dwceqos_driver); - -MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>"); -MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>"); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index baa3e4a5731c..f864fd0663db 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget) * device lock and allow waiting tasks (eg rmmod) to advance) */ priv->napi_stop = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); bdx_enable_interrupts(priv); } return work_done; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b203143647e6..35a95dcc755b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -145,6 +145,7 @@ do { \ cpsw->data.active_slave) #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 +#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 static int debug_level; module_param(debug_level, int, 0); @@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE; module_param(rx_packet_max, int, 0); MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); +static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; +module_param(descs_pool_size, int, 0444); +MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); + struct cpsw_wr_regs { u32 id_ver; u32 soft_reset; @@ -352,7 +357,6 @@ struct cpsw_slave { struct phy_device *phy; struct net_device *ndev; u32 port_vlan; - u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -667,6 +671,18 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw) return; } +static int cpsw_get_usage_count(struct cpsw_common *cpsw) +{ + u32 i; + u32 usage_count = 0; + + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev && netif_running(cpsw->slaves[i].ndev)) + usage_count++; + + return usage_count; +} + static void cpsw_tx_handler(void *token, int len, int status) { struct netdev_queue *txq; @@ -699,18 +715,10 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { - bool ndev_status = false; - struct cpsw_slave *slave = cpsw->slaves; - int n; - - if (cpsw->data.dual_emac) { - /* In dual emac mode check for all interfaces */ - for (n = cpsw->data.slaves; n; n--, slave++) - if (netif_running(slave->ndev)) - ndev_status = true; - } - - if (ndev_status && (status >= 0)) { + /* In dual emac mode check for all interfaces */ + if (cpsw->data.dual_emac && + cpsw_get_usage_count(cpsw) && + (status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results @@ -934,7 +942,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) } if (num_rx < budget) { - napi_complete(napi_rx); + napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; @@ -1230,21 +1238,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev, } } -static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) -{ - u32 i; - u32 usage_count = 0; - - if (!cpsw->data.dual_emac) - return 0; - - for (i = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].open_stat) - usage_count++; - - return usage_count; -} - static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, struct sk_buff *skb, struct cpdma_chan *txch) @@ -1478,8 +1471,6 @@ static int cpsw_ndo_open(struct net_device *ndev) return ret; } - if (!cpsw_common_res_usage_state(cpsw)) - cpsw_intr_disable(cpsw); netif_carrier_off(ndev); /* Notify the stack of the actual queue counts. */ @@ -1501,8 +1492,11 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); - /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(cpsw)) + /* Initialize host and slave ports. + * Given ndev is marked as opened already, so init port only if 1 ndev + * is opened + */ + if (cpsw_get_usage_count(cpsw) < 2) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); @@ -1513,7 +1507,10 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(cpsw)) { + /* Given ndev is marked as opened already, so if more ndev + * are opened - no need to init shared resources. + */ + if (cpsw_get_usage_count(cpsw) < 2) { /* disable priority elevation */ __raw_writel(0, &cpsw->regs->ptype); @@ -1556,9 +1553,6 @@ static int cpsw_ndo_open(struct net_device *ndev) cpdma_ctlr_start(cpsw->dma); cpsw_intr_enable(cpsw); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = true; - return 0; err_cleanup: @@ -1578,7 +1572,10 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(cpsw) <= 1) { + /* Given ndev is marked as close already, + * so disable shared resources if no open devices + */ + if (!cpsw_get_usage_count(cpsw)) { napi_disable(&cpsw->napi_rx); napi_disable(&cpsw->napi_tx); cpts_unregister(cpsw->cpts); @@ -1592,8 +1589,6 @@ static int cpsw_ndo_stop(struct net_device *ndev) cpsw_split_res(ndev); pm_runtime_put_sync(cpsw->dev); - if (cpsw->data.dual_emac) - cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -2363,17 +2358,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv, return 0; } -static int cpsw_set_channels(struct net_device *ndev, - struct ethtool_channels *chs) +static void cpsw_suspend_data_pass(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); struct cpsw_slave *slave; - int i, ret; - - ret = cpsw_check_ch_settings(cpsw, chs); - if (ret < 0) - return ret; + int i; /* Disable NAPI scheduling */ cpsw_intr_disable(cpsw); @@ -2391,6 +2380,51 @@ static int cpsw_set_channels(struct net_device *ndev, /* Handle rest of tx packets and stop cpdma channels */ cpdma_ctlr_stop(cpsw->dma); +} + +static int cpsw_resume_data_pass(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* Allow rx packets handling */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_dormant_off(slave->ndev); + + /* After this receive is started */ + if (cpsw_get_usage_count(cpsw)) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + return ret; + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_tx_start_all_queues(slave->ndev); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + cpsw_suspend_data_pass(ndev); ret = cpsw_update_channels(priv, chs); if (ret) goto err; @@ -2413,30 +2447,14 @@ static int cpsw_set_channels(struct net_device *ndev, dev_err(priv->dev, "cannot set real number of rx queues\n"); goto err; } - - /* Enable rx packets handling */ - netif_dormant_off(slave->ndev); } - if (cpsw_common_res_usage_state(cpsw)) { - ret = cpsw_fill_rx_channels(priv); - if (ret) - goto err; - + if (cpsw_get_usage_count(cpsw)) cpsw_split_res(ndev); - /* After this receive is started */ - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } - - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - netif_tx_start_all_queues(slave->ndev); - } - return 0; + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; err: dev_err(priv->dev, "cannot update channels number, closing device\n"); dev_close(ndev); @@ -2479,6 +2497,52 @@ static int cpsw_nway_reset(struct net_device *ndev) return -EOPNOTSUPP; } +static void cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + /* not supported */ + ering->tx_max_pending = 0; + ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); + ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; + ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); +} + +static int cpsw_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + /* ignore ering->tx_pending - only rx_pending adjustment is supported */ + + if (ering->rx_mini_pending || ering->rx_jumbo_pending || + ering->rx_pending < CPSW_MAX_QUEUES || + ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES)) + return -EINVAL; + + if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) + return 0; + + cpsw_suspend_data_pass(ndev); + + cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); + + if (cpsw_get_usage_count(cpsw)) + cpdma_chan_split_pool(cpsw->dma); + + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; + + dev_err(&ndev->dev, "cannot set ring params, closing device\n"); + dev_close(ndev); + return ret; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -2505,6 +2569,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_eee = cpsw_get_eee, .set_eee = cpsw_set_eee, .nway_reset = cpsw_nway_reset, + .get_ringparam = cpsw_get_ringparam, + .set_ringparam = cpsw_set_ringparam, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, @@ -2969,6 +3035,7 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.has_ext_regs = true; dma_params.desc_hw_addr = dma_params.desc_mem_phys; dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; + dma_params.descs_pool_size = descs_pool_size; cpsw->dma = cpdma_ctlr_create(&dma_params); if (!cpsw->dma) { @@ -3072,9 +3139,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", - &ss_res->start, ndev->irq); - + cpsw_notice(priv, probe, + "initialized device (regs %pa, irq %d, pool size %d)\n", + &ss_res->start, ndev->irq, dma_params.descs_pool_size); if (cpsw->data.dual_emac) { ret = cpsw_probe_dual_emac(priv); if (ret) { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 43b061bd8e07..ddd43e09111e 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine * * Copyright (C) 2012 Texas Instruments * @@ -27,11 +27,14 @@ #define BITMASK(bits) (BIT(bits) - 1) -#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff) +#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask)) #define ALE_VERSION_MINOR(rev) (rev & 0xff) +#define ALE_VERSION_1R3 0x0103 +#define ALE_VERSION_1R4 0x0104 /* ALE Registers */ #define ALE_IDVER 0x00 +#define ALE_STATUS 0x04 #define ALE_CONTROL 0x08 #define ALE_PRESCALE 0x10 #define ALE_UNKNOWNVLAN 0x18 @@ -39,6 +42,13 @@ #define ALE_TABLE 0x34 #define ALE_PORTCTL 0x40 +/* ALE NetCP NU switch specific Registers */ +#define ALE_UNKNOWNVLAN_MEMBER 0x90 +#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94 +#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98 +#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C +#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg))) + #define ALE_TABLE_WRITE BIT(31) #define ALE_TYPE_FREE 0 @@ -51,6 +61,10 @@ #define ALE_UCAST_OUI 2 #define ALE_UCAST_TOUCHED 3 +#define ALE_TABLE_SIZE_MULTIPLIER 1024 +#define ALE_STATUS_SIZE_MASK 0x1f +#define ALE_TABLE_SIZE_DEFAULT 64 + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { int idx; @@ -84,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ cpsw_ale_set_field(ale_entry, start, bits, value); \ } +#define DEFINE_ALE_FIELD1(name, start) \ +static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \ +{ \ + return cpsw_ale_get_field(ale_entry, start, bits); \ +} \ +static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \ + u32 bits) \ +{ \ + cpsw_ale_set_field(ale_entry, start, bits, value); \ +} + DEFINE_ALE_FIELD(entry_type, 60, 2) DEFINE_ALE_FIELD(vlan_id, 48, 12) DEFINE_ALE_FIELD(mcast_state, 62, 2) -DEFINE_ALE_FIELD(port_mask, 66, 3) +DEFINE_ALE_FIELD1(port_mask, 66) DEFINE_ALE_FIELD(super, 65, 1) DEFINE_ALE_FIELD(ucast_type, 62, 2) -DEFINE_ALE_FIELD(port_num, 66, 2) +DEFINE_ALE_FIELD1(port_num, 66) DEFINE_ALE_FIELD(blocked, 65, 1) DEFINE_ALE_FIELD(secure, 64, 1) -DEFINE_ALE_FIELD(vlan_untag_force, 24, 3) -DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3) -DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3) -DEFINE_ALE_FIELD(vlan_member_list, 0, 3) +DEFINE_ALE_FIELD1(vlan_untag_force, 24) +DEFINE_ALE_FIELD1(vlan_reg_mcast, 16) +DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8) +DEFINE_ALE_FIELD1(vlan_member_list, 0) DEFINE_ALE_FIELD(mcast, 40, 1) +/* ALE NetCP nu switch specific */ +DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3) +DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3) /* The MAC address field in the ALE entry cannot be macroized as above */ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) @@ -223,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, { int mask; - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); if ((mask & port_mask) == 0) return; /* ports dont intersect, not interested */ mask &= ~port_mask; /* free if only remaining port is host port */ if (mask) - cpsw_ale_set_port_mask(ale_entry, mask); + cpsw_ale_set_port_mask(ale_entry, mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } @@ -291,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); - cpsw_ale_set_port_num(ale_entry, port); + cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits); idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) @@ -338,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); - mask = cpsw_ale_get_port_mask(ale_entry); + mask = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); port_mask |= mask; - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -367,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_port_mask(ale_entry, port_mask); + cpsw_ale_set_port_mask(ale_entry, port_mask, + ale->port_mask_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -376,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, } EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast); +/* ALE NetCP NU switch specific vlan functions */ +static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, + int reg_mcast, int unreg_mcast) +{ + int idx; + + /* Set VLAN registered multicast flood mask */ + idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry); + writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); + + /* Set VLAN unregistered multicast flood mask */ + idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry); + writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); +} + int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast) { @@ -389,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); - cpsw_ale_set_vlan_untag_force(ale_entry, untag); - cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); - cpsw_ale_set_vlan_member_list(ale_entry, port); + cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits); + if (!ale->params.nu_switch_ale) { + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, + ale->vlan_field_bits); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); + } else { + cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast); + } + cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -418,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + cpsw_ale_set_vlan_member_list(ale_entry, port_mask, + ale->vlan_field_bits); else cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); @@ -446,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) if (type != ALE_TYPE_VLAN) continue; - unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + unreg_mcast = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); if (allmulti) unreg_mcast |= 1; else unreg_mcast &= ~1; - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); cpsw_ale_write(ale, idx, ale_entry); } } @@ -464,7 +522,7 @@ struct ale_control_info { int bits; }; -static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { +static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { [ALE_ENABLE] = { .name = "enable", .offset = ALE_CONTROL, @@ -721,11 +779,83 @@ static void cpsw_ale_timer(unsigned long arg) void cpsw_ale_start(struct cpsw_ale *ale) { - u32 rev; + u32 rev, ale_entries; rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); - dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n", - ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev)); + if (!ale->params.major_ver_mask) + ale->params.major_ver_mask = 0xff; + ale->version = + (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) | + ALE_VERSION_MINOR(rev); + dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n", + ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), + ALE_VERSION_MINOR(rev)); + + if (!ale->params.ale_entries) { + ale_entries = + __raw_readl(ale->params.ale_regs + ALE_STATUS) & + ALE_STATUS_SIZE_MASK; + /* ALE available on newer NetCP switches has introduced + * a register, ALE_STATUS, to indicate the size of ALE + * table which shows the size as a multiple of 1024 entries. + * For these, params.ale_entries will be set to zero. So + * read the register and update the value of ale_entries. + * ALE table on NetCP lite, is much smaller and is indicated + * by a value of zero in ALE_STATUS. So use a default value + * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected + * to set the value of ale_entries for all other versions + * of ALE. + */ + if (!ale_entries) + ale_entries = ALE_TABLE_SIZE_DEFAULT; + else + ale_entries *= ALE_TABLE_SIZE_MULTIPLIER; + ale->params.ale_entries = ale_entries; + } + dev_info(ale->params.dev, + "ALE Table size %ld\n", ale->params.ale_entries); + + /* set default bits for existing h/w */ + ale->port_mask_bits = 3; + ale->port_num_bits = 2; + ale->vlan_field_bits = 3; + + /* Set defaults override for ALE on NetCP NU switch and for version + * 1R3 + */ + if (ale->params.nu_switch_ale) { + /* Separate registers for unknown vlan configuration. + * Also there are N bits, where N is number of ale + * ports and shift value should be 0 + */ + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset = + ALE_UNKNOWNVLAN_MEMBER; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0; + ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset = + ALE_UNKNOWNVLAN_REG_MCAST_FLOOD; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits = + ale->params.ale_ports; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0; + ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset = + ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = ale->params.ale_ports - 1; + ale->vlan_field_bits = ale->params.ale_ports; + } else if (ale->version == ALE_VERSION_1R3) { + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = 3; + ale->vlan_field_bits = ale->params.ale_ports; + } + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index a7001894f3da..25d24e8d0904 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -1,5 +1,5 @@ /* - * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs + * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs * * Copyright (C) 2012 Texas Instruments * @@ -21,6 +21,16 @@ struct cpsw_ale_params { unsigned long ale_ageout; /* in secs */ unsigned long ale_entries; unsigned long ale_ports; + /* NU Switch has specific handling as number of bits in ALE entries + * are different than other versions of ALE. Also there are specific + * registers for unknown vlan specific fields. So use nu_switch_ale + * to identify this hardware. + */ + bool nu_switch_ale; + /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So + * pass it from caller. + */ + u32 major_ver_mask; }; struct cpsw_ale { @@ -28,6 +38,11 @@ struct cpsw_ale { struct timer_list timer; unsigned long ageout; int allmulti; + u32 version; + /* These bits are different on NetCP NU Switch ALE */ + u32 port_mask_bits; + u32 port_num_bits; + u32 vlan_field_bits; }; enum cpsw_ale_control { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 36518fc5c7cc..7ecc6b70e7e8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -108,6 +108,8 @@ struct cpdma_ctlr { spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; int chan_num; + int num_rx_desc; /* RX descriptors number */ + int num_tx_desc; /* TX descriptors number */ }; struct cpdma_chan { @@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = { #define num_chan params.num_chan /* various accessors */ -#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) -#define chan_read(chan, fld) __raw_readl((chan)->fld) -#define desc_read(desc, fld) __raw_readl(&(desc)->fld) -#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) -#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) -#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) +#define chan_read(chan, fld) readl((chan)->fld) +#define desc_read(desc, fld) readl(&(desc)->fld) +#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) +#define chan_write(chan, fld, v) writel(v, (chan)->fld) +#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld) #define cpdma_desc_to_port(chan, mode, directed) \ do { \ @@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) { + struct cpdma_desc_pool *pool = ctlr->pool; + if (!pool) return; @@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, pool->phys); - else - iounmap(pool->iomap); } /* @@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ -static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, - int size, int align) +int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) { + struct cpdma_params *cpdma_params = &ctlr->params; struct cpdma_desc_pool *pool; - int ret; + int ret = -ENOMEM; - pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); + pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); if (!pool) goto gen_pool_create_fail; + ctlr->pool = pool; + + pool->mem_size = cpdma_params->desc_mem_size; + pool->desc_size = ALIGN(sizeof(struct cpdma_desc), + cpdma_params->desc_align); + pool->num_desc = pool->mem_size / pool->desc_size; + + if (cpdma_params->descs_pool_size) { + /* recalculate memory size required cpdma descriptor pool + * basing on number of descriptors specified by user and + * if memory size > CPPI internal RAM size (desc_mem_size) + * then switch to use DDR + */ + pool->num_desc = cpdma_params->descs_pool_size; + pool->mem_size = pool->desc_size * pool->num_desc; + if (pool->mem_size > cpdma_params->desc_mem_size) + cpdma_params->desc_mem_phys = 0; + } - pool->dev = dev; - pool->mem_size = size; - pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); - pool->num_desc = size / pool->desc_size; - - pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, - "cpdma"); + pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), + -1, "cpdma"); if (IS_ERR(pool->gen_pool)) { - dev_err(dev, "pool create failed %ld\n", - PTR_ERR(pool->gen_pool)); + ret = PTR_ERR(pool->gen_pool); + dev_err(ctlr->dev, "pool create failed %d\n", ret); goto gen_pool_create_fail; } - if (phys) { - pool->phys = phys; - pool->iomap = ioremap(phys, size); /* should be memremap? */ - pool->hw_addr = hw_addr; + if (cpdma_params->desc_mem_phys) { + pool->phys = cpdma_params->desc_mem_phys; + pool->iomap = devm_ioremap(ctlr->dev, pool->phys, + pool->mem_size); + pool->hw_addr = cpdma_params->desc_hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, - GFP_KERNEL); + pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, + &pool->hw_addr, GFP_KERNEL); pool->iomap = (void __iomem __force *)pool->cpumap; pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } @@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, pool->phys, pool->mem_size, -1); if (ret < 0) { - dev_err(dev, "pool add failed %d\n", ret); + dev_err(ctlr->dev, "pool add failed %d\n", ret); goto gen_pool_add_virt_fail; } - return pool; + return 0; gen_pool_add_virt_fail: - cpdma_desc_pool_destroy(pool); + cpdma_desc_pool_destroy(ctlr); gen_pool_create_fail: - return NULL; + ctlr->pool = NULL; + return ret; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->chan_num = 0; spin_lock_init(&ctlr->lock); - ctlr->pool = cpdma_desc_pool_create(ctlr->dev, - ctlr->params.desc_mem_phys, - ctlr->params.desc_hw_addr, - ctlr->params.desc_mem_size, - ctlr->params.desc_align); - if (!ctlr->pool) + if (cpdma_desc_pool_create(ctlr)) return NULL; + /* split pool equally between RX/TX by default */ + ctlr->num_tx_desc = ctlr->pool->num_desc / 2; + ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; @@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } for (i = 0; i < ctlr->num_chan; i++) { - __raw_writel(0, ctlr->params.txhdp + 4 * i); - __raw_writel(0, ctlr->params.rxhdp + 4 * i); - __raw_writel(0, ctlr->params.txcp + 4 * i); - __raw_writel(0, ctlr->params.rxcp + 4 * i); + writel(0, ctlr->params.txhdp + 4 * i); + writel(0, ctlr->params.rxhdp + 4 * i); + writel(0, ctlr->params.txcp + 4 * i); + writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); @@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) cpdma_chan_destroy(ctlr->channels[i]); - cpdma_desc_pool_destroy(ctlr->pool); + cpdma_desc_pool_destroy(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); @@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, } } /* use remains */ - most_chan->desc_num += desc_cnt; + if (most_chan) + most_chan->desc_num += desc_cnt; } /** * cpdma_chan_split_pool - Splits ctrl pool between all channels. * Has to be called under ctlr lock */ -static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) { int tx_per_ch_desc = 0, rx_per_ch_desc = 0; - struct cpdma_desc_pool *pool = ctlr->pool; int free_rx_num = 0, free_tx_num = 0; int rx_weight = 0, tx_weight = 0; int tx_desc_num, rx_desc_num; struct cpdma_chan *chan; - int i, tx_num = 0; + int i; if (!ctlr->chan_num) return 0; @@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) if (!chan->weight) free_tx_num++; tx_weight += chan->weight; - tx_num++; } } if (rx_weight > 100 || tx_weight > 100) return -EINVAL; - tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; - rx_desc_num = pool->num_desc - tx_desc_num; + tx_desc_num = ctlr->num_tx_desc; + rx_desc_num = ctlr->num_rx_desc; if (free_tx_num) { tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; @@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); + /* cpdma_chan_set_weight - set weight of a channel in percentage. * Tx and Rx channels have separate weights. That is 100% for RX @@ -820,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); */ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) { - struct cpdma_ctlr *ctlr = ch->ctlr; unsigned long flags, ch_flags; + struct cpdma_ctlr *ctlr; int ret, prio_mode; u32 rmask; @@ -831,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) if (ch->rate == rate) return rate; + ctlr = ch->ctlr; spin_lock_irqsave(&ctlr->lock, flags); spin_lock_irqsave(&ch->lock, ch_flags); @@ -898,7 +914,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->chan_num = chan_num; chan->handler = handler; chan->rate = 0; - chan->desc_num = ctlr->pool->num_desc / 2; chan->weight = 0; if (is_rx_chan(chan)) { @@ -1061,13 +1076,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); - desc_write(desc, hw_next, 0); - desc_write(desc, hw_buffer, buffer); - desc_write(desc, hw_len, len); - desc_write(desc, hw_mode, mode | len); - desc_write(desc, sw_token, token); - desc_write(desc, sw_buffer, buffer); - desc_write(desc, sw_len, len); + /* Relaxed IO accessors can be used here as there is read barrier + * at the end of write sequence. + */ + writel_relaxed(0, &desc->hw_next); + writel_relaxed(buffer, &desc->hw_buffer); + writel_relaxed(len, &desc->hw_len); + writel_relaxed(mode | len, &desc->hw_mode); + writel_relaxed(token, &desc->sw_token); + writel_relaxed(buffer, &desc->sw_buffer); + writel_relaxed(len, &desc->sw_len); + desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); @@ -1136,7 +1155,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } desc_dma = desc_phys(pool, desc); - status = __raw_readl(&desc->hw_mode); + status = desc_read(desc, hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; @@ -1155,7 +1174,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) chan->count--; chan->stats.good_dequeue++; - if (status & CPDMA_DESC_EOQ) { + if ((status & CPDMA_DESC_EOQ) && chan->head) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } @@ -1316,4 +1335,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) } EXPORT_SYMBOL_GPL(cpdma_control_set); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); + +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) +{ + return ctlr->num_tx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); + +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) +{ + ctlr->num_rx_desc = num_rx_desc; + ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; +} +EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 4a167db2abab..fd65ce2b83de 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -37,6 +37,7 @@ struct cpdma_params { int desc_mem_size; int desc_align; u32 bus_freq_mhz; + u32 descs_pool_size; /* * Some instances of embedded cpdma controllers have extra control and @@ -113,5 +114,9 @@ enum cpdma_control { int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); +int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr); +void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc); +int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr); +int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr); #endif diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 481c7bf0395b..64d5527feb2a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget) &emac_rxhost_errcodes[cause][0], ch); } } else if (num_rx_pkts < budget) { - napi_complete(napi); + napi_complete_done(napi, num_rx_pkts); emac_int_enable(priv); } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 0f58c584ae09..8900a6fad318 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -23,6 +23,7 @@ #include <linux/netdevice.h> #include <linux/soc/ti/knav_dma.h> +#include <linux/u64_stats_sync.h> /* Maximum Ethernet frame size supported by Keystone switch */ #define NETCP_MAX_FRAME_SIZE 9504 @@ -68,6 +69,20 @@ struct netcp_addr { struct list_head node; }; +struct netcp_stats { + struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp; + u64 rx_packets; + u64 rx_bytes; + u32 rx_errors; + u32 rx_dropped; + + struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp; + u64 tx_packets; + u64 tx_bytes; + u32 tx_errors; + u32 tx_dropped; +}; + struct netcp_intf { struct device *dev; struct device *ndev_dev; @@ -87,6 +102,11 @@ struct netcp_intf { void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; +#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0) + u32 hw_cap; + + /* 64-bit netcp stats */ + struct netcp_stats stats; void *rx_channel; const char *dma_chan_name; @@ -115,6 +135,7 @@ struct netcp_packet { struct sk_buff *skb; __le32 *epib; u32 *psdata; + u32 eflags; unsigned int psdata_len; struct netcp_intf *netcp; struct netcp_tx_pipe *tx_pipe; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c243335ed649..ebab1473f366 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc, *ndesc = le32_to_cpu(desc->next_desc); } +static void get_desc_info(u32 *desc_info, u32 *pkt_info, + struct knav_dma_desc *desc) +{ + *desc_info = le32_to_cpu(desc->desc_info); + *pkt_info = le32_to_cpu(desc->packet_info); +} + static u32 get_sw_data(int index, struct knav_dma_desc *desc) { /* No Endian conversion needed as this data is untouched by hw */ @@ -622,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, static void netcp_empty_rx_queue(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; struct knav_dma_desc *desc; unsigned int dma_sz; dma_addr_t dma; @@ -635,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp) if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", __func__); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; continue; } netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_dropped++; + rx_stats->rx_dropped++; } } static int netcp_process_one_rx_packet(struct netcp_intf *netcp) { + struct netcp_stats *rx_stats = &netcp->stats; unsigned int dma_sz, buf_len, org_buf_len; struct knav_dma_desc *desc, *ndesc; unsigned int pkt_sz = 0, accum_sz; @@ -653,6 +662,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) struct netcp_packet p_info; struct sk_buff *skb; void *org_buf_ptr; + u32 tmp; dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); if (!dma_desc) @@ -724,21 +734,27 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) knav_pool_desc_put(netcp->rx_pool, ndesc); } - /* Free the primary descriptor */ - knav_pool_desc_put(netcp->rx_pool, desc); - /* check for packet len and warn */ if (unlikely(pkt_sz != accum_sz)) dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", pkt_sz, accum_sz); - /* Remove ethernet FCS from the packet */ - __pskb_trim(skb, skb->len - ETH_FCS_LEN); + /* Newer version of the Ethernet switch can trim the Ethernet FCS + * from the packet and is indicated in hw_cap. So trim it only for + * older h/w + */ + if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS)) + __pskb_trim(skb, skb->len - ETH_FCS_LEN); /* Call each of the RX hooks */ p_info.skb = skb; skb->dev = netcp->ndev; p_info.rxtstamp_complete = false; + get_desc_info(&tmp, &p_info.eflags, desc); + p_info.epib = desc->epib; + p_info.psdata = (u32 __force *)desc->psdata; + p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) & + KNAV_DMA_DESC_EFLAGS_MASK); list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { int ret; @@ -747,14 +763,20 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) if (unlikely(ret)) { dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", rx_hook->order, ret); - netcp->ndev->stats.rx_errors++; + /* Free the primary descriptor */ + rx_stats->rx_dropped++; + knav_pool_desc_put(netcp->rx_pool, desc); dev_kfree_skb(skb); return 0; } } + /* Free the primary descriptor */ + knav_pool_desc_put(netcp->rx_pool, desc); - netcp->ndev->stats.rx_packets++; - netcp->ndev->stats.rx_bytes += skb->len; + u64_stats_update_begin(&rx_stats->syncp_rx); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; + u64_stats_update_end(&rx_stats->syncp_rx); /* push skb up the stack */ skb->protocol = eth_type_trans(skb, netcp->ndev); @@ -763,7 +785,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp) free_desc: netcp_free_rx_desc_chain(netcp, desc); - netcp->ndev->stats.rx_errors++; + rx_stats->rx_errors++; return 0; } @@ -947,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) netcp_rxpool_refill(netcp); if (packets < budget) { - napi_complete(&netcp->rx_napi); + napi_complete_done(&netcp->rx_napi, packets); knav_queue_enable_notify(netcp->rx_queue); } @@ -994,6 +1016,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, unsigned int budget) { + struct netcp_stats *tx_stats = &netcp->stats; struct knav_dma_desc *desc; struct netcp_tx_cb *tx_cb; struct sk_buff *skb; @@ -1008,7 +1031,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); if (unlikely(!desc)) { dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1019,7 +1042,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netcp_free_tx_desc_chain(netcp, desc, dma_sz); if (!skb) { dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); - netcp->ndev->stats.tx_errors++; + tx_stats->tx_errors++; continue; } @@ -1036,8 +1059,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, netif_wake_subqueue(netcp->ndev, subqueue); } - netcp->ndev->stats.tx_packets++; - netcp->ndev->stats.tx_bytes += skb->len; + u64_stats_update_begin(&tx_stats->syncp_tx); + tx_stats->tx_packets++; + tx_stats->tx_bytes += skb->len; + u64_stats_update_end(&tx_stats->syncp_tx); dev_kfree_skb(skb); pkts++; } @@ -1212,9 +1237,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, /* psdata points to both native-endian and device-endian data */ __le32 *psdata = (void __force *)p_info.psdata; - memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, - p_info.psdata_len); - set_words(p_info.psdata, p_info.psdata_len, psdata); + set_words((u32 *)psdata + + (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len), + p_info.psdata_len, psdata); tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } @@ -1258,6 +1283,7 @@ out: static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *tx_stats = &netcp->stats; int subqueue = skb_get_queue_mapping(skb); struct knav_dma_desc *desc; int desc_count, ret = 0; @@ -1273,7 +1299,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* If we get here, the skb has already been dropped */ dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", ret); - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; return ret; } skb->len = NETCP_MIN_PACKET_SIZE; @@ -1301,7 +1327,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; drop: - ndev->stats.tx_dropped++; + tx_stats->tx_dropped++; if (desc) netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); dev_kfree_skb(skb); @@ -1883,12 +1909,44 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return 0; } +static void +netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_stats *p = &netcp->stats; + u64 rxpackets, rxbytes, txpackets, txbytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_rx); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start)); + + do { + start = u64_stats_fetch_begin_irq(&p->syncp_tx); + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start)); + + stats->rx_packets = rxpackets; + stats->rx_bytes = rxbytes; + stats->tx_packets = txpackets; + stats->tx_bytes = txbytes; + + /* The following are stored as 32 bit */ + stats->rx_errors = p->rx_errors; + stats->rx_dropped = p->rx_dropped; + stats->tx_dropped = p->tx_dropped; +} + static const struct net_device_ops netcp_netdev_ops = { .ndo_open = netcp_ndo_open, .ndo_stop = netcp_ndo_stop, .ndo_start_xmit = netcp_ndo_start_xmit, .ndo_set_rx_mode = netcp_set_rx_mode, .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_get_stats64 = netcp_get_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = netcp_rx_add_vid, @@ -1935,6 +1993,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, INIT_LIST_HEAD(&netcp->txhook_list_head); INIT_LIST_HEAD(&netcp->rxhook_list_head); INIT_LIST_HEAD(&netcp->addr_list); + u64_stats_init(&netcp->stats.syncp_rx); + u64_stats_init(&netcp->stats.syncp_tx); netcp->netcp_device = netcp_device; netcp->dev = netcp_device->device; netcp->ndev = ndev; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 7d9e36f66735..f7bb241b17ab 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -81,7 +81,6 @@ #define GBENU_CPTS_OFFSET 0x1d000 #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 -#define GBENU_NUM_ALE_ENTRIES 1024 #define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ @@ -103,7 +102,7 @@ #define XGBE10_ALE_OFFSET 0x700 #define XGBE10_HW_STATS_OFFSET 0x800 #define XGBE10_HOST_PORT_NUM 0 -#define XGBE10_NUM_ALE_ENTRIES 1024 +#define XGBE10_NUM_ALE_ENTRIES 2048 #define GBE_TIMER_INTERVAL (HZ / 2) @@ -122,6 +121,7 @@ #define MACSL_FULLDUPLEX BIT(0) #define GBE_CTL_P0_ENABLE BIT(2) +#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13) #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf #define GBE_STATS_CD_SEL BIT(28) @@ -2821,7 +2821,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) struct netcp_intf *netcp = netdev_priv(ndev); struct gbe_slave *slave = gbe_intf->slave; int port_num = slave->port_num; - u32 reg; + u32 reg, val; int ret; reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); @@ -2851,7 +2851,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); /* Control register */ - writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); + val = GBE_CTL_P0_ENABLE; + if (IS_SS_ID_MU(gbe_dev)) { + val |= ETH_SW_CTL_P0_TX_CRC_REMOVE; + netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS; + } + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control)); /* All statistics enabled and STAT AB visible by default */ writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, @@ -2930,7 +2935,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, } slave->open = false; - slave->phy_node = of_parse_phandle(node, "phy-handle", 0); + if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == XGMII_LINK_MAC_PHY)) + slave->phy_node = of_parse_phandle(node, "phy-handle", 0); slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); if (slave->link_interface >= XGMII_LINK_MAC_PHY) @@ -3433,7 +3440,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; - gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ @@ -3601,7 +3607,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; ale_params.ale_entries = gbe_dev->ale_entries; ale_params.ale_ports = gbe_dev->ale_ports; - + if (IS_SS_ID_MU(gbe_dev)) { + ale_params.major_ver_mask = 0x7; + ale_params.nu_switch_ale = true; + } gbe_dev->ale = cpsw_ale_create(&ale_params); if (!gbe_dev->ale) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 2255f9a6f3bc..7c634bc75615 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info_mpipe->napi); + napi_complete_done(&info_mpipe->napi, work); md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 0a3b7dafa3ba..49ccee4b9aec 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } } - napi_complete(&info->napi); + napi_complete_done(&info->napi, work); if (!priv->active) goto done; @@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * * Returns the address of the device statistics structure. */ -static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tile_net_priv *priv = netdev_priv(dev); u64 rx_packets = 0, tx_packets = 0; @@ -2090,12 +2090,8 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, stats->tx_bytes = tx_bytes; stats->rx_errors = rx_errors; stats->rx_dropped = rx_dropped; - - return stats; } - - /* * Change the Ethernet Address of the NIC. * diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 345316c749e7..72013314bba8 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget) } if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); gelic_card_rx_irq_on(card); } return packets_done; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cb341dfe65ad..cec9e70ab995 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ if (packets_done < budget) { - napi_complete(napi); + napi_complete_done(napi, packets_done); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 3be61ed28741..a45f98fa4aa7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) spin_unlock(&lp->rx_lock); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); /* enable interrupts */ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index f153ad729ce5..c5583991da4a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget) if (num_received < budget) { data->rxpending = 0; - napi_complete(napi); + napi_complete_done(napi, num_received); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 0a6c4e804eed..c068c58428f7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance); static void rhine_tx(struct net_device *dev); static int rhine_rx(struct net_device *dev, int limit); static void rhine_set_rx_mode(struct net_device *dev); -static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); +static void rhine_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static int rhine_close(struct net_device *dev); @@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); mmiowb(); } @@ -2221,7 +2221,7 @@ out_unlock: mutex_unlock(&rp->task_lock); } -static struct rtnl_link_stats64 * +static void rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rhine_private *rp = netdev_priv(dev); @@ -2244,8 +2244,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); - - return stats; } static void rhine_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 4716e60e2ccb..d088788b27a7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget) velocity_tx_srv(vptr); /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_done); mac_enable_int(vptr->mac_regs); } spin_unlock_irqrestore(&vptr->lock, flags); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index e1296ef2cf66..f90267f0519f 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5100_enable_intr(priv); } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 724fabd38a23..56ae573001e8 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { - napi_complete(napi); + napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 93dc10b10c09..e3070fd88bce 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1029,20 +1029,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) } /** - * xemaclite_remove_ndev - Free the network device - * @ndev: Pointer to the network device to be freed - * - * This function un maps the IO region of the Emaclite device and frees the net - * device. - */ -static void xemaclite_remove_ndev(struct net_device *ndev) -{ - if (ndev) { - free_netdev(ndev); - } -} - -/** * get_bool - Get a parameter from the OF device * @ofdev: Pointer to OF device structure * @s: Property to be retrieved @@ -1065,7 +1051,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s) } } -static struct net_device_ops xemaclite_netdev_ops; +static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. @@ -1172,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) return 0; error: - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return rc; } @@ -1204,7 +1190,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - xemaclite_remove_ndev(ndev); + free_netdev(ndev); return 0; } @@ -1219,7 +1205,7 @@ xemaclite_poll_controller(struct net_device *ndev) } #endif -static struct net_device_ops xemaclite_netdev_ops = { +static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c index e395ace3120b..648ff9fdb909 100644 --- a/drivers/net/fddi/skfp/cfm.c +++ b/drivers/net/fddi/skfp/cfm.c @@ -52,7 +52,6 @@ static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ; #define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG) #define ACTIONS(x) (x|AFLAG) -#ifdef DEBUG /* * symbolic state names */ @@ -68,7 +67,6 @@ static const char * const cfm_states[] = { static const char * const cfm_events[] = { "NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B" } ; -#endif /* * map from state to downstream port type @@ -230,10 +228,10 @@ void cfm(struct s_smc *smc, int event) oldstate = smc->mib.fddiSMTCF_State ; do { - DB_CFM("CFM : state %s%s", - (smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "", - cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ; - DB_CFM(" event %s\n",cfm_events[event],0) ; + DB_CFM("CFM : state %s%s event %s", + smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "", + cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG], + cfm_events[event]); state = smc->mib.fddiSMTCF_State ; cfm_fsm(smc,event) ; event = 0 ; @@ -297,7 +295,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ /* Don't do the WC-Flag changing here */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break; case SC0_ISOLATED : /*SC07*/ @@ -338,7 +336,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC9_C_WRAP_A : /*SC10*/ @@ -403,7 +401,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC10_C_WRAP_B : /*SC20*/ @@ -448,7 +446,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC4_THRU_A : /*SC41*/ @@ -481,7 +479,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC5_THRU_B : /*SC51*/ @@ -519,7 +517,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd) queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; - DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; + DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]); break ; case SC11_C_WRAP_S : /*SC70*/ diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c index 07da97c303d6..fed3a92d3df4 100644 --- a/drivers/net/fddi/skfp/drvfbi.c +++ b/drivers/net/fddi/skfp/drvfbi.c @@ -343,8 +343,8 @@ void init_board(struct s_smc *smc, u_char *mac_addr) */ void sm_pm_bypass_req(struct s_smc *smc, int mode) { - DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ? - "BP_INSERT" : "BP_DEINSERT",0) ; + DB_ECMN(1, "ECM : sm_pm_bypass_req(%s)", + mode == BP_INSERT ? "BP_INSERT" : "BP_DEINSERT"); if (smc->s.sas != SMT_DAS) return ; diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c index 47d922cb3c08..eee9ba91346a 100644 --- a/drivers/net/fddi/skfp/ecm.c +++ b/drivers/net/fddi/skfp/ecm.c @@ -66,7 +66,6 @@ static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ; #define EC6_CHECK 6 /* checking bypass */ #define EC7_DEINSERT 7 /* bypass being turnde off */ -#ifdef DEBUG /* * symbolic state names */ @@ -83,7 +82,6 @@ static const char * const ecm_events[] = { "EC_TIMEOUT_TD","EC_TIMEOUT_TMAX", "EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE" } ; -#endif /* * all Globals are defined in smc.h @@ -126,10 +124,10 @@ void ecm(struct s_smc *smc, int event) int state ; do { - DB_ECM("ECM : state %s%s", - (smc->mib.fddiSMTECMState & AFLAG) ? "ACTIONS " : "", - ecm_states[smc->mib.fddiSMTECMState & ~AFLAG]) ; - DB_ECM(" event %s\n",ecm_events[event],0) ; + DB_ECM("ECM : state %s%s event %s", + smc->mib.fddiSMTECMState & AFLAG ? "ACTIONS " : "", + ecm_states[smc->mib.fddiSMTECMState & ~AFLAG], + ecm_events[event]); state = smc->mib.fddiSMTECMState ; ecm_fsm(smc,event) ; event = 0 ; @@ -379,7 +377,7 @@ static void ecm_fsm(struct s_smc *smc, int cmd) (((ls_a == PC_ILS) && (ls_b == PC_QLS)) || ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){ smc->e.sb_flag = TRUE ; - DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ; + DB_ECMN(1, "ECM : EC6_CHECK - stuck bypass"); AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK, smt_get_error_word(smc)); @@ -443,29 +441,29 @@ static void prop_actions(struct s_smc *smc) return ; } - DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ; - DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ; + DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop); + DB_ECM("ECM : prop_actions - in %d out %d", port_in, port_out); if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) { /* trace initiatior */ - DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ; + DB_ECM("ECM : initiate TRACE on PHY %c", 'A' + port_in - PA); queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) && port_out != PA) { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ; + DB_ECM("ECM : propagate TRACE on PHY B"); queue_event(smc,EVENT_PCMB,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) && port_out != PB) { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ; + DB_ECM("ECM : propagate TRACE on PHY A"); queue_event(smc,EVENT_PCMA,PC_TRACE) ; } else { /* signal trace termination */ - DB_ECM("ECM : TRACE terminated\n",0,0) ; + DB_ECM("ECM : TRACE terminated"); smc->e.path_test = PT_PENDING ; } smc->e.trace_prop = 0 ; @@ -482,13 +480,13 @@ static void prop_actions(struct s_smc *smc) RS_SET(smc,RS_EVENT) ; while (smc->e.trace_prop) { - DB_ECM("ECM : prop_actions - trace_prop %d\n", - smc->e.trace_prop,0) ; + DB_ECM("ECM : prop_actions - trace_prop %d", + smc->e.trace_prop); if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) { initiator = ENTITY_MAC ; smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ; - DB_ECM("ECM: MAC initiates trace\n",0,0) ; + DB_ECM("ECM: MAC initiates trace"); } else { for (p = NUMPHYS-1 ; p >= 0 ; p--) { @@ -503,12 +501,12 @@ static void prop_actions(struct s_smc *smc) if (upstream == ENTITY_MAC) { /* signal trace termination */ - DB_ECM("ECM : TRACE terminated\n",0,0) ; + DB_ECM("ECM : TRACE terminated"); smc->e.path_test = PT_PENDING ; } else { /* trace propagate upstream */ - DB_ECM("ECM : propagate TRACE on PHY %d\n",upstream,0) ; + DB_ECM("ECM : propagate TRACE on PHY %d", upstream); queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ; } } diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c index 2fc5987b41dc..325e2c525e35 100644 --- a/drivers/net/fddi/skfp/ess.c +++ b/drivers/net/fddi/skfp/ess.c @@ -134,7 +134,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * get the resource type */ if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) { - DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ; + DB_ESS("ESS: RAF frame error, parameter type not found"); return fs; } msg_res_type = ((struct smt_p_0015 *)p)->res_type ; @@ -146,16 +146,16 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, /* * error in frame: para ESS command was not found */ - DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0); + DB_ESS("ESS: RAF frame error, parameter command not found"); return fs; } - DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ; - DB_ESSN(2,"ver %x tran %lx\n",sm->smt_version,sm->smt_tid) ; - DB_ESSN(2,"stn_id %s\n",addr_to_string(&sm->smt_source),0) ; + DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type); + DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid); + DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source)); - DB_ESSN(2,"infolen %x res %x\n",sm->smt_len, msg_res_type) ; - DB_ESSN(2,"sbacmd %x\n",cmd->sba_cmd,0) ; + DB_ESSN(2, "infolen %x res %lx", sm->smt_len, msg_res_type); + DB_ESSN(2, "sbacmd %x", cmd->sba_cmd); /* * evaluate the ESS command @@ -189,7 +189,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * The ESS do not send the Frame to the network! */ smc->ess.alloc_trans_id = sm->smt_tid ; - DB_ESS("ESS: save Alloc Req Trans ID %lx\n",sm->smt_tid,0); + DB_ESS("ESS: save Alloc Req Trans ID %x", sm->smt_tid); p = (void *) sm_to_para(smc,sm,SMT_P320F) ; ((struct smt_p_320f *)p)->mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ; @@ -220,7 +220,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * check the parameters */ if (smt_check_para(smc,sm,plist_raf_alc_res)) { - DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF with para problem, ignoring"); return fs; } @@ -241,7 +241,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, != SMT_RDF_SUCCESS) || (sm->smt_tid != smc->ess.alloc_trans_id)) { - DB_ESS("ESS: Allocation Response not accepted\n",0,0) ; + DB_ESS("ESS: Allocation Response not accepted"); return fs; } @@ -261,7 +261,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, } overhead = ((struct smt_p_3210 *)p)->mib_overhead ; - DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ; + DB_ESSN(2, "payload= %lx overhead= %lx", + payload, overhead); /* * process the bandwidth allocation @@ -279,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * except only replies */ if (sm->smt_type != SMT_REQUEST) { - DB_ESS("ESS: Do not process Change Responses\n",0,0) ; + DB_ESS("ESS: Do not process Change Responses"); return fs; } @@ -287,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * check the para for the Change Request */ if (smt_check_para(smc,sm,plist_raf_chg_req)) { - DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF with para problem, ignoring"); return fs; } @@ -299,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, */ if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index != PRIMARY_RING) || (msg_res_type != SYNC_BW)) { - DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ; + DB_ESS("ESS: RAF frame with para problem, ignoring"); return fs; } @@ -311,9 +312,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, p = (void *) sm_to_para(smc,sm,SMT_P3210) ; overhead = ((struct smt_p_3210 *)p)->mib_overhead ; - DB_ESSN(2,"ESS: Change Request from %s\n", - addr_to_string(&sm->smt_source),0) ; - DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ; + DB_ESSN(2, "ESS: Change Request from %s", + addr_to_string(&sm->smt_source)); + DB_ESSN(2, "payload= %lx overhead= %lx", + payload, overhead); /* * process the bandwidth allocation @@ -337,18 +339,18 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, * except only requests */ if (sm->smt_type != SMT_REQUEST) { - DB_ESS("ESS: Do not process a Report Reply\n",0,0) ; + DB_ESS("ESS: Do not process a Report Reply"); return fs; } - DB_ESSN(2,"ESS: Report Request from %s\n", - addr_to_string(&(sm->smt_source)),0) ; + DB_ESSN(2, "ESS: Report Request from %s", + addr_to_string(&sm->smt_source)); /* * verify that the resource type is sync bw only */ if (msg_res_type != SYNC_BW) { - DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ; + DB_ESS("ESS: ignoring RAF with para problem"); return fs; } @@ -364,7 +366,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, /* * error in frame */ - DB_ESS("ESS: ignoring RAF with bad sba_cmd\n",0,0) ; + DB_ESS("ESS: ignoring RAF with bad sba_cmd"); break ; } @@ -417,17 +419,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe * set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload */ /* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) { - DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ; + DB_ESS("ESS: SMT does not accept the payload value"); return FALSE; } if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) { - DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ; + DB_ESS("ESS: SMT does not accept the overhead value"); return FALSE; } */ /* premliminary */ if (payload > MAX_PAYLOAD || overhead > 5000) { - DB_ESS("ESS: payload / overhead not accepted\n",0,0) ; + DB_ESS("ESS: payload / overhead not accepted"); return FALSE; } @@ -446,7 +448,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe * evulate the Payload */ if (payload) { - DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit on\n",0,0) ; + DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit on"); smc->ess.sync_bw_available = TRUE ; smc->ess.sync_bw = overhead - @@ -454,7 +456,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe payload / 1562 ; } else { - DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit off\n",0,0) ; + DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit off"); smc->ess.sync_bw_available = FALSE ; smc->ess.sync_bw = 0 ; overhead = 0 ; @@ -464,7 +466,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ; - DB_ESSN(2,"tsync = %lx\n",smc->ess.sync_bw,0) ; + DB_ESSN(2, "tsync = %lx", smc->ess.sync_bw); ess_config_fifo(smc) ; set_formac_tsync(smc,smc->ess.sync_bw) ; @@ -541,7 +543,7 @@ void ess_timer_poll(struct s_smc *smc) if (!smc->ess.raf_act_timer_poll) return ; - DB_ESSN(2,"ESS: timer_poll\n",0,0) ; + DB_ESSN(2, "ESS: timer_poll"); smc->ess.timer_count++ ; if (smc->ess.timer_count == 10) { @@ -667,11 +669,11 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb) /* * Send the Change Reply to the local SBA */ - DB_ESS("ESS:Send to the local SBA\n",0,0) ; + DB_ESS("ESS:Send to the local SBA"); if (!smc->ess.sba_reply_pend) smc->ess.sba_reply_pend = mb ; else { - DB_ESS("Frame is lost - another frame was pending\n",0,0); + DB_ESS("Frame is lost - another frame was pending"); smt_free_mbuf(smc,mb) ; } } @@ -679,7 +681,7 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb) /* * Send the SBA RAF Change Reply to the network */ - DB_ESS("ESS:Send to the network\n",0,0) ; + DB_ESS("ESS:Send to the network"); smt_send_frame(smc,mb,FC_SMT_INFO,0) ; } } diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c index 7d3779ae7377..24aed28b982c 100644 --- a/drivers/net/fddi/skfp/fplustm.c +++ b/drivers/net/fddi/skfp/fplustm.c @@ -726,7 +726,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l) if (code_s2u & FM_SMYBEC) queue_event(smc,EVENT_RMT,RM_MY_BEACON) ; if (change_s2u & code_s2u & FM_SLOCLM) { - DB_RMTN(2,"RMT : lower claim received\n",0,0) ; + DB_RMTN(2, "RMT : lower claim received"); } if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) { /* @@ -746,7 +746,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l) queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ; } if (change_s2u & code_s2u & FM_SHICLM) { - DB_RMTN(2,"RMT : higher claim received\n",0,0) ; + DB_RMTN(2, "RMT : higher claim received"); } if ( (code_s2l & FM_STRTEXP) || (code_s2l & FM_STRTEXR) ) @@ -1334,7 +1334,7 @@ void rtm_irq(struct s_smc *smc) outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */ if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) { outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */ - DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ; + DB_RMT("RMT: fddiPATHT_Rmode expired"); AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_EVENT, (u_long) FDDI_RTT, smt_get_event_word(smc)); @@ -1353,8 +1353,8 @@ void rtm_set_timer(struct s_smc *smc) /* * MIB timer and hardware timer have the same resolution of 80nS */ - DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n", - (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ; + DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns", + (int)smc->mib.a[PATH0].fddiPATHT_Rmode); outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ; } @@ -1469,13 +1469,13 @@ static void smt_split_up_fifo(struct s_smc *smc) smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start + smc->hw.fp.fifo.tx_a0_size ; - DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ; - DB_SMT("rbc_ram_start = %x rbc_ram_end = %x\n", - smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ; - DB_SMT("rx1_fifo_start = %x tx_s_start = %x\n", - smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ; - DB_SMT("tx_a0_start = %x rx2_fifo_start = %x\n", - smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ; + DB_SMT("FIFO split: mode = %x", smc->hw.fp.fifo.fifo_config_mode); + DB_SMT("rbc_ram_start = %x rbc_ram_end = %x", + smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end); + DB_SMT("rx1_fifo_start = %x tx_s_start = %x", + smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start); + DB_SMT("tx_a0_start = %x rx2_fifo_start = %x", + smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start); } void formac_reinit_tx(struct s_smc *smc) diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h index f5bc90ff2a2a..5d6891154367 100644 --- a/drivers/net/fddi/skfp/h/cmtdef.h +++ b/drivers/net/fddi/skfp/h/cmtdef.h @@ -54,43 +54,48 @@ #endif #ifdef DEBUG -#define DB_PR(flag,a,b,c) { if (flag) printf(a,b,c) ; } +#define DB_PR(flag, fmt, ...) \ + do { if (flag) printf(fmt "\n", ##__VA_ARGS__); } while (0) #else -#define DB_PR(flag,a,b,c) +#define DB_PR(flag, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) + #endif #ifdef DEBUG_BRD -#define DB_ECM(a,b,c) DB_PR((smc->debug.d_smt&1),a,b,c) -#define DB_ECMN(n,a,b,c) DB_PR((smc->debug.d_ecm >=(n)),a,b,c) -#define DB_RMT(a,b,c) DB_PR((smc->debug.d_smt&2),a,b,c) -#define DB_RMTN(n,a,b,c) DB_PR((smc->debug.d_rmt >=(n)),a,b,c) -#define DB_CFM(a,b,c) DB_PR((smc->debug.d_smt&4),a,b,c) -#define DB_CFMN(n,a,b,c) DB_PR((smc->debug.d_cfm >=(n)),a,b,c) -#define DB_PCM(a,b,c) DB_PR((smc->debug.d_smt&8),a,b,c) -#define DB_PCMN(n,a,b,c) DB_PR((smc->debug.d_pcm >=(n)),a,b,c) -#define DB_SMT(a,b,c) DB_PR((smc->debug.d_smtf),a,b,c) -#define DB_SMTN(n,a,b,c) DB_PR((smc->debug.d_smtf >=(n)),a,b,c) -#define DB_SBA(a,b,c) DB_PR((smc->debug.d_sba),a,b,c) -#define DB_SBAN(n,a,b,c) DB_PR((smc->debug.d_sba >=(n)),a,b,c) -#define DB_ESS(a,b,c) DB_PR((smc->debug.d_ess),a,b,c) -#define DB_ESSN(n,a,b,c) DB_PR((smc->debug.d_ess >=(n)),a,b,c) +#define DB_TEST (smc->debug) #else -#define DB_ECM(a,b,c) DB_PR((debug.d_smt&1),a,b,c) -#define DB_ECMN(n,a,b,c) DB_PR((debug.d_ecm >=(n)),a,b,c) -#define DB_RMT(a,b,c) DB_PR((debug.d_smt&2),a,b,c) -#define DB_RMTN(n,a,b,c) DB_PR((debug.d_rmt >=(n)),a,b,c) -#define DB_CFM(a,b,c) DB_PR((debug.d_smt&4),a,b,c) -#define DB_CFMN(n,a,b,c) DB_PR((debug.d_cfm >=(n)),a,b,c) -#define DB_PCM(a,b,c) DB_PR((debug.d_smt&8),a,b,c) -#define DB_PCMN(n,a,b,c) DB_PR((debug.d_pcm >=(n)),a,b,c) -#define DB_SMT(a,b,c) DB_PR((debug.d_smtf),a,b,c) -#define DB_SMTN(n,a,b,c) DB_PR((debug.d_smtf >=(n)),a,b,c) -#define DB_SBA(a,b,c) DB_PR((debug.d_sba),a,b,c) -#define DB_SBAN(n,a,b,c) DB_PR((debug.d_sba >=(n)),a,b,c) -#define DB_ESS(a,b,c) DB_PR((debug.d_ess),a,b,c) -#define DB_ESSN(n,a,b,c) DB_PR((debug.d_ess >=(n)),a,b,c) +#define DB_TEST (debug) #endif +#define DB_ECM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 1, fmt, ##__VA_ARGS__) +#define DB_ECMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_ecm >= (n), fmt, ##__VA_ARGS__) +#define DB_RMT(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 2, fmt, ##__VA_ARGS__) +#define DB_RMTN(n, fmt, ...) \ + DB_PR((DB_TEST).d_rmt >= (n), fmt, ##__VA_ARGS__) +#define DB_CFM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 4, fmt, ##__VA_ARGS__) +#define DB_CFMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_cfm >= (n), fmt, ##__VA_ARGS__) +#define DB_PCM(fmt, ...) \ + DB_PR((DB_TEST).d_smt & 8, fmt, ##__VA_ARGS__) +#define DB_PCMN(n, fmt, ...) \ + DB_PR((DB_TEST).d_pcm >= (n), fmt, ##__VA_ARGS__) +#define DB_SMT(fmt, ...) \ + DB_PR((DB_TEST).d_smtf, fmt, ##__VA_ARGS__) +#define DB_SMTN(n, fmt, ...) \ + DB_PR((DB_TEST).d_smtf >= (n), fmt, ##__VA_ARGS__) +#define DB_SBA(fmt, ...) \ + DB_PR((DB_TEST).d_sba, fmt, ##__VA_ARGS__) +#define DB_SBAN(n, fmt, ...) \ + DB_PR((DB_TEST).d_sba >= (n), fmt, ##__VA_ARGS__) +#define DB_ESS(fmt, ...) \ + DB_PR((DB_TEST).d_ess, fmt, ##__VA_ARGS__) +#define DB_ESSN(n, fmt, ...) \ + DB_PR((DB_TEST).d_ess >= (n), fmt, ##__VA_ARGS__) + #ifndef SS_NOT_DS #define SK_LOC_DECL(type,var) type var #else @@ -640,8 +645,8 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text); #define dump_smt(smc,sm,text) #endif -#ifdef DEBUG char* addr_to_string(struct fddi_addr *addr); +#ifdef DEBUG void dump_hex(char *p, int len); #endif diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h index 4ca2341d7f06..123cfa09c354 100644 --- a/drivers/net/fddi/skfp/h/hwmtm.h +++ b/drivers/net/fddi/skfp/h/hwmtm.h @@ -168,13 +168,25 @@ struct os_debug { #define DB_P debug #endif -#define DB_RX(a,b,c,lev) if (DB_P.d_os.hwm_rx >= (lev)) printf(a,b,c) -#define DB_TX(a,b,c,lev) if (DB_P.d_os.hwm_tx >= (lev)) printf(a,b,c) -#define DB_GEN(a,b,c,lev) if (DB_P.d_os.hwm_gen >= (lev)) printf(a,b,c) +#define DB_RX(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_rx >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) +#define DB_TX(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_tx >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) +#define DB_GEN(lev, fmt, ...) \ +do { \ + if (DB_P.d_os.hwm_gen >= (lev)) \ + printf(fmt "\n", ##__VA_ARGS__); \ +} while (0) #else /* DEBUG */ -#define DB_RX(a,b,c,lev) -#define DB_TX(a,b,c,lev) -#define DB_GEN(a,b,c,lev) +#define DB_RX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) +#define DB_TX(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) +#define DB_GEN(lev, fmt, ...) no_printk(fmt "\n", ##__VA_ARGS__) #endif /* DEBUG */ #ifndef SK_BREAK diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index d0a68bdd5f63..abbe309051d9 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -158,7 +158,7 @@ u_int mac_drv_check_space(void); SMbuf* smt_get_mbuf(struct s_smc *smc); #ifdef DEBUG - void mac_drv_debug_lev(void); + void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev); #endif /* @@ -330,7 +330,7 @@ static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *d2 ; u_long phys ; - DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ; + DB_GEN(3, "descr ring starts at = %p", start); for (i=count-1, d1=start; i ; i--) { d2 = d1 ; d1++ ; /* descr is owned by the host */ @@ -339,7 +339,7 @@ static u_long init_descr_ring(struct s_smc *smc, phys = mac_drv_virt2phys(smc,(void *)d1) ; d2->r.rxd_nrdadr = cpu_to_le32(phys) ; } - DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; + DB_GEN(3, "descr ring ends at = %p", d1); d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d1->r.rxd_next = &start->r ; phys = mac_drv_virt2phys(smc,(void *)start) ; @@ -364,7 +364,7 @@ static void init_txd_ring(struct s_smc *smc) ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p + SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ; queue = smc->hw.fp.tx[QUEUE_A0] ; - DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; + DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_ASYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; @@ -378,7 +378,7 @@ static void init_txd_ring(struct s_smc *smc) ds = (struct s_smt_fp_txd volatile *) ((char *)ds + HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ; queue = smc->hw.fp.tx[QUEUE_S] ; - DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; + DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_SYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; @@ -400,7 +400,7 @@ static void init_rxd_ring(struct s_smc *smc) */ ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ; queue = smc->hw.fp.rx[QUEUE_R1] ; - DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; + DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT); (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, SMT_R1_RXD_COUNT) ; phys = le32_to_cpu(ds->rxd_nrdadr) ; @@ -469,11 +469,11 @@ void init_fddi_driver(struct s_smc *smc, u_char *mac_addr) */ i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ; if (i != 16) { - DB_GEN("i = %d",i,0,3) ; + DB_GEN(3, "i = %d", i); smc->os.hwm.descr_p = (union s_fp_descr volatile *) ((char *)smc->os.hwm.descr_p+i) ; } - DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ; + DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p); init_txd_ring(smc) ; init_rxd_ring(smc) ; @@ -501,7 +501,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc) mb->sm_off = 8 ; mb->sm_use_count = 1 ; } - DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ; + DB_GEN(3, "get SMbuf: mb = %p", mb); return mb; /* May be NULL */ } @@ -510,14 +510,14 @@ void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) if (mb) { mb->sm_use_count-- ; - DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ; + DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count); /* * If the use_count is != zero the MBuf is queued * more than once and must not queued into the * free MBuf queue */ if (!mb->sm_use_count) { - DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ; + DB_GEN(3, "free SMbuf: mb = %p", mb); #ifndef COMMON_MB_POOL mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; smc->os.hwm.mbuf_pool.mb_free = mb ; @@ -741,7 +741,7 @@ void fddi_isr(struct s_smc *smc) while ((is = GET_ISR() & ISR_MASK)) { NDD_TRACE("CH0B",is,0,0) ; - DB_GEN("ISA = 0x%x",is,0,7) ; + DB_GEN(7, "ISA = 0x%lx", is); if (is & IMASK_SLOW) { NDD_TRACE("CH1b",is,0,0) ; @@ -754,20 +754,20 @@ void fddi_isr(struct s_smc *smc) if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ stu = inpw(FM_A(FM_ST1U)) ; stl = inpw(FM_A(FM_ST1L)) ; - DB_GEN("Slow transmit complete",0,0,6) ; + DB_GEN(6, "Slow transmit complete"); mac1_irq(smc,stu,stl) ; } if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ stu= inpw(FM_A(FM_ST2U)) ; stl= inpw(FM_A(FM_ST2L)) ; - DB_GEN("Slow receive complete",0,0,6) ; - DB_GEN("stl = %x : stu = %x",stl,stu,7) ; + DB_GEN(6, "Slow receive complete"); + DB_GEN(7, "stl = %x : stu = %x", stl, stu); mac2_irq(smc,stu,stl) ; } if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ stu= inpw(FM_A(FM_ST3U)) ; stl= inpw(FM_A(FM_ST3L)) ; - DB_GEN("FORMAC Mode Register 3",0,0,6) ; + DB_GEN(6, "FORMAC Mode Register 3"); mac3_irq(smc,stu,stl) ; } if (is & IS_TIMINT) { /* Timer 82C54-2 */ @@ -814,7 +814,7 @@ void fddi_isr(struct s_smc *smc) * Fast Tx complete Async/Sync Queue (BMU service) */ if (is & (IS_XS_F|IS_XA_F)) { - DB_GEN("Fast tx complete queue",0,0,6) ; + DB_GEN(6, "Fast tx complete queue"); /* * clear IRQ, Note: no IRQ is lost, because * we always service both queues @@ -829,7 +829,7 @@ void fddi_isr(struct s_smc *smc) * Fast Rx Complete (BMU service) */ if (is & IS_R1_F) { - DB_GEN("Fast receive complete",0,0,6) ; + DB_GEN(6, "Fast receive complete"); /* clear IRQ */ #ifndef USE_BREAK_ISR outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; @@ -1083,13 +1083,13 @@ void process_receive(struct s_smc *smc) #endif n = 0 ; do { - DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; + DB_RX(5, "Check RxD %p for OWN and EOF", r); DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); if (rbctrl & BMU_OWN) { NDD_TRACE("RHxE",r,rfsw,rbctrl) ; - DB_RX("End of RxDs",0,0,4) ; + DB_RX(4, "End of RxDs"); goto rx_end ; } /* @@ -1136,19 +1136,19 @@ void process_receive(struct s_smc *smc) rx_used-- ; } while (!(rbctrl & BMU_EOF)) ; used_frags = frag_count ; - DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ; + DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags); /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ /* BMU_ST_BUF will not be changed by the ASIC */ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { - DB_RX("Check STF bit in %x",(void *)r,0,5) ; + DB_RX(5, "Check STF bit in %p", r); r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; rx_used-- ; } - DB_RX("STF bit found",0,0,5) ; + DB_RX(5, "STF bit found"); /* * The received frame is finished for the process receive @@ -1164,7 +1164,7 @@ void process_receive(struct s_smc *smc) rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ - DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; + DB_RX(5, "dma_complete for RxD %p", r); dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } smc->hw.fp.err_stats.err_valid++ ; @@ -1173,34 +1173,34 @@ void process_receive(struct s_smc *smc) /* the length of the data including the FC */ len = (rfsw & RD_LENGTH) - 4 ; - DB_RX("frame length = %d",len,0,4) ; + DB_RX(4, "frame length = %d", len); /* * check the frame_length and all error flags */ if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ if (rfsw & RD_S_MSRABT) { - DB_RX("Frame aborted by the FORMAC",0,0,2) ; + DB_RX(2, "Frame aborted by the FORMAC"); smc->hw.fp.err_stats.err_abort++ ; } /* * check frame status */ if (rfsw & RD_S_SEAC2) { - DB_RX("E-Indicator set",0,0,2) ; + DB_RX(2, "E-Indicator set"); smc->hw.fp.err_stats.err_e_indicator++ ; } if (rfsw & RD_S_SFRMERR) { - DB_RX("CRC error",0,0,2) ; + DB_RX(2, "CRC error"); smc->hw.fp.err_stats.err_crc++ ; } if (rfsw & RX_FS_IMPL) { - DB_RX("Implementer frame",0,0,2) ; + DB_RX(2, "Implementer frame"); smc->hw.fp.err_stats.err_imp_frame++ ; } goto abort_frame ; } if (len > FDDI_RAW_MTU-4) { - DB_RX("Frame too long error",0,0,2) ; + DB_RX(2, "Frame too long error"); smc->hw.fp.err_stats.err_too_long++ ; goto abort_frame ; } @@ -1209,12 +1209,12 @@ void process_receive(struct s_smc *smc) * of aborded frames to the BMU */ if (len <= 4) { - DB_RX("Frame length = 0",0,0,2) ; + DB_RX(2, "Frame length = 0"); goto abort_frame ; } if (len != (n-4)) { - DB_RX("BMU: rx len differs: [%d:%d]",len,n,4); + DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n); smc->os.hwm.rx_len_error++ ; goto abort_frame ; } @@ -1223,7 +1223,7 @@ void process_receive(struct s_smc *smc) * Check SA == MA */ virt = (u_char far *) rxd->rxd_virt ; - DB_RX("FC = %x",*virt,0,2) ; + DB_RX(2, "FC = %x", *virt); if (virt[12] == MA[5] && virt[11] == MA[4] && virt[10] == MA[3] && @@ -1250,7 +1250,7 @@ void process_receive(struct s_smc *smc) virt[3] != MA[2] || virt[2] != MA[1] || virt[1] != MA[0]) { - DB_RX("DA != MA and not multi- or broadcast",0,0,2) ; + DB_RX(2, "DA != MA and not multi- or broadcast"); goto abort_frame ; } } @@ -1259,13 +1259,13 @@ void process_receive(struct s_smc *smc) /* * LLC frame received */ - DB_RX("LLC - receive",0,0,4) ; + DB_RX(4, "LLC - receive"); mac_drv_rx_complete(smc,rxd,frag_count,len) ; } else { if (!(mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; - DB_RX("No SMbuf; receive terminated",0,0,4) ; + DB_RX(4, "No SMbuf; receive terminated"); goto abort_frame ; } data = smtod(mb,char *) - 1 ; @@ -1278,7 +1278,7 @@ void process_receive(struct s_smc *smc) #else for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; - DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; + DB_RX(6, "cp SMT frame to mb: len = %d", n); memcpy(data,r->rxd_virt,n) ; data += n ; } @@ -1294,15 +1294,15 @@ void process_receive(struct s_smc *smc) switch(fc) { case FC_SMT_INFO : smc->hw.fp.err_stats.err_smt_frame++ ; - DB_RX("SMT frame received ",0,0,5) ; + DB_RX(5, "SMT frame received"); if (smc->os.hwm.pass_SMT) { - DB_RX("pass SMT frame ",0,0,5) ; + DB_RX(5, "pass SMT frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } @@ -1310,7 +1310,7 @@ void process_receive(struct s_smc *smc) break ; case FC_SMT_NSA : smc->hw.fp.err_stats.err_smt_frame++ ; - DB_RX("SMT frame received ",0,0,5) ; + DB_RX(5, "SMT frame received"); /* if pass_NSA set pass the NSA frame or */ /* pass_SMT set and the A-Indicator */ @@ -1318,12 +1318,12 @@ void process_receive(struct s_smc *smc) if (smc->os.hwm.pass_NSA || (smc->os.hwm.pass_SMT && !(rfsw & A_INDIC))) { - DB_RX("pass SMT frame ",0,0,5) ; + DB_RX(5, "pass SMT frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } @@ -1331,12 +1331,12 @@ void process_receive(struct s_smc *smc) break ; case FC_BEACON : if (smc->os.hwm.pass_DB) { - DB_RX("pass DB frame ",0,0,5) ; + DB_RX(5, "pass DB frame"); mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_free_mbuf(smc,mb) ; @@ -1345,9 +1345,9 @@ void process_receive(struct s_smc *smc) /* * unknown FC abord the frame */ - DB_RX("unknown FC error",0,0,2) ; + DB_RX(2, "unknown FC error"); smt_free_mbuf(smc,mb) ; - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count) ; if ((fc & 0xf0) == FC_MAC) smc->hw.fp.err_stats.err_mac_frame++ ; @@ -1358,16 +1358,16 @@ void process_receive(struct s_smc *smc) } } - DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; + DB_RX(3, "next RxD is %p", queue->rx_curr_get); NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; continue ; /*--------------------------------------------------------------------*/ abort_frame: - DB_RX("requeue RxD",0,0,5) ; + DB_RX(5, "requeue RxD"); mac_drv_requeue_rxd(smc,rxd,frag_count) ; - DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; + DB_RX(3, "next RxD is %p", queue->rx_curr_get); NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; } rx_end: @@ -1381,7 +1381,7 @@ static void smt_to_llc(struct s_smc *smc, SMbuf *mb) { u_char fc ; - DB_RX("send a queued frame to the llc layer",0,0,4) ; + DB_RX(4, "send a queued frame to the llc layer"); smc->os.hwm.r.len = mb->sm_len ; smc->os.hwm.r.mb_pos = smtod(mb,char *) ; fc = *smc->os.hwm.r.mb_pos ; @@ -1419,7 +1419,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, __le32 rbctrl; NDD_TRACE("RHfB",virt,len,frame_status) ; - DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; + DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status); r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; r->rxd_virt = virt ; r->rxd_rbadr = cpu_to_le32(phys) ; @@ -1475,7 +1475,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) } queue = smc->hw.fp.rx[QUEUE_R1] ; - DB_RX("clear_rx_queue",0,0,5) ; + DB_RX(5, "clear_rx_queue"); /* * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers @@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) r = queue->rx_curr_get ; while (queue->rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; - DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ; + DB_RX(5, "switch OWN bit of RxD 0x%p", r); r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; frag_count = 1 ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; @@ -1491,23 +1491,23 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (r != queue->rx_curr_put && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { - DB_RX("Check STF bit in %x",(void *)r,0,5) ; + DB_RX(5, "Check STF bit in %p", r); r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; } - DB_RX("STF bit found",0,0,5) ; + DB_RX(5, "STF bit found"); next_rxd = r ; for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ - DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; + DB_RX(5, "dma_complete for RxD %p", r); dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } - DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ", - (void *)queue->rx_curr_get,frag_count,5) ; + DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d", + queue->rx_curr_get, frag_count); mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; queue->rx_curr_get = next_rxd ; @@ -1554,7 +1554,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; smc->os.hwm.tx_len = frame_len ; - DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ; + DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len); if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { frame_status |= LAN_TX ; } @@ -1577,23 +1577,23 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, if (!smc->hw.mac_ring_is_up) { frame_status &= ~LAN_TX ; frame_status |= RING_DOWN ; - DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; + DB_TX(2, "Ring is down: terminate LAN_TX"); } if (frag_count > smc->os.hwm.tx_p->tx_free) { #ifndef NDIS_OS2 mac_drv_clear_txd(smc) ; if (frag_count > smc->os.hwm.tx_p->tx_free) { - DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; + DB_TX(2, "Out of TxDs, terminate LAN_TX"); frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; } #else - DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; + DB_TX(2, "Out of TxDs, terminate LAN_TX"); frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; #endif } - DB_TX("frame_status = %x",frame_status,0,3) ; + DB_TX(3, "frame_status = %x", frame_status); NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; return frame_status; } @@ -1642,10 +1642,10 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, */ t = queue->tx_curr_put ; - DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ; + DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status); if (frame_status & LAN_TX) { /* '*t' is already defined */ - DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ; + DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt); t->txd_virt = virt ; t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; t->txd_tbadr = cpu_to_le32(phys) ; @@ -1674,11 +1674,11 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, } } if (frame_status & LOC_TX) { - DB_TX("LOC_TX: ",0,0,3) ; + DB_TX(3, "LOC_TX:"); if (frame_status & FIRST_FRAG) { if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; - DB_TX("No SMbuf; transmit terminated",0,0,4) ; + DB_TX(4, "No SMbuf; transmit terminated"); } else { smc->os.hwm.tx_data = @@ -1693,7 +1693,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, } if (smc->os.hwm.tx_mb) { #ifndef USE_OS_CPY - DB_TX("copy fragment into MBuf ",0,0,3) ; + DB_TX(3, "copy fragment into MBuf"); memcpy(smc->os.hwm.tx_data,virt,len) ; smc->os.hwm.tx_data += len ; #endif @@ -1718,7 +1718,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, smc->os.hwm.tx_data++ ; smc->os.hwm.tx_mb->sm_len = smc->os.hwm.tx_len - 1 ; - DB_TX("pass LLC frame to SMT ",0,0,3) ; + DB_TX(3, "pass LLC frame to SMT"); smt_received_pack(smc,smc->os.hwm.tx_mb, RD_FS_LOCAL) ; } @@ -1733,7 +1733,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, */ static void queue_llc_rx(struct s_smc *smc, SMbuf *mb) { - DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ; + DB_GEN(4, "queue_llc_rx: mb = %p", mb); smc->os.hwm.queued_rx_frames++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.llc_rx_pipe == NULL) { @@ -1763,7 +1763,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc) smc->os.hwm.queued_rx_frames-- ; smc->os.hwm.llc_rx_pipe = mb->sm_next ; } - DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; + DB_GEN(4, "get_llc_rx: mb = 0x%p", mb); return mb; } @@ -1773,7 +1773,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc) */ static void queue_txd_mb(struct s_smc *smc, SMbuf *mb) { - DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ; + DB_GEN(4, "_rx: queue_txd_mb = %p", mb); smc->os.hwm.queued_txd_mb++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.txd_tx_pipe == NULL) { @@ -1796,7 +1796,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc) smc->os.hwm.queued_txd_mb-- ; smc->os.hwm.txd_tx_pipe = mb->sm_next ; } - DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; + DB_GEN(4, "get_txd_mb: mb = 0x%p", mb); return mb; } @@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) __le32 tbctrl; NDD_TRACE("THSB",mb,fc,0) ; - DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ; + DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc); mb->sm_off-- ; /* set to fc */ mb->sm_len++ ; /* + fc */ @@ -1838,7 +1838,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) if (n >= len) { n = len ; } - DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ; + DB_TX(5, "frag: virt/len = 0x%p/%d", data, n); virt[frag_count] = data ; frag_len[frag_count] = n ; frag_count++ ; @@ -1863,15 +1863,15 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { frame_status &= ~LAN_TX; if (frame_status) { - DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; + DB_TX(2, "Ring is down: terminate LAN_TX"); } else { - DB_TX("Ring is down: terminate transmission",0,0,2) ; + DB_TX(2, "Ring is down: terminate transmission"); smt_free_mbuf(smc,mb) ; return ; } } - DB_TX("frame_status = 0x%x ",frame_status,0,5) ; + DB_TX(5, "frame_status = 0x%x", frame_status); if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { mb->sm_use_count = 2 ; @@ -1881,7 +1881,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) t = queue->tx_curr_put ; frame_status |= FIRST_FRAG ; for (i = 0; i < frag_count; i++) { - DB_TX("init TxD = 0x%x",(void *)t,0,5) ; + DB_TX(5, "init TxD = 0x%p", t); if (i == frag_count-1) { frame_status |= LAST_FRAG ; t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | @@ -1912,7 +1912,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) } if (frame_status & LOC_TX) { - DB_TX("pass Mbuf to LLC queue",0,0,5) ; + DB_TX(5, "pass Mbuf to LLC queue"); queue_llc_rx(smc,mb) ; } @@ -1953,18 +1953,18 @@ static void mac_drv_clear_txd(struct s_smc *smc) for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t1 = queue->tx_curr_get ; - DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ; + DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i); for ( ; ; ) { frag_count = 0 ; do { DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; - DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ; + DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1); tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); if (tbctrl & BMU_OWN || !queue->tx_used){ - DB_TX("End of TxDs queue %d",i,0,4) ; + DB_TX(4, "End of TxDs queue %d", i); goto free_next_queue ; /* next queue */ } t1 = t1->txd_next ; @@ -1988,11 +1988,11 @@ static void mac_drv_clear_txd(struct s_smc *smc) } else { #ifndef PASS_1ST_TXD_2_TX_COMP - DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ; + DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2); mac_drv_tx_complete(smc,t2) ; #else - DB_TX("mac_drv_tx_comp for TxD 0x%x", - queue->tx_curr_get,0,4) ; + DB_TX(4, "mac_drv_tx_comp for TxD 0x%x", + queue->tx_curr_get); mac_drv_tx_complete(smc,queue->tx_curr_get) ; #endif } @@ -2043,7 +2043,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; - DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ; + DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i); /* * switch the OWN bit of all pending frames to the host @@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) tx_used = queue->tx_used ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; - DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ; + DB_TX(5, "switch OWN bit of TxD 0x%p", t); t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 88d02d0a42c4..a9ecf923f63d 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -91,7 +91,6 @@ int p #define PC8_ACTIVE 8 #define PC9_MAINT 9 -#ifdef DEBUG /* * symbolic state names */ @@ -113,7 +112,6 @@ static const char * const pcm_events[] = { "PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT", "PC_NSE","PC_LEM" } ; -#endif #ifdef MOT_ELM /* @@ -610,12 +608,11 @@ void pcm(struct s_smc *smc, const int np, int event) mib = phy->mib ; oldstate = mib->fddiPORTPCMState ; do { - DB_PCM("PCM %c: state %s", - phy->phy_name, - (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ; - DB_PCM("%s, event %s\n", - pcm_states[mib->fddiPORTPCMState & ~AFLAG], - pcm_events[event]) ; + DB_PCM("PCM %c: state %s%s, event %s", + phy->phy_name, + mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "", + pcm_states[mib->fddiPORTPCMState & ~AFLAG], + pcm_events[event]); state = mib->fddiPORTPCMState ; pcm_fsm(smc,phy,event) ; event = 0 ; @@ -1017,7 +1014,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) ACTIONS_DONE() ; break ; case PC9_MAINT : - DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : MAINT", phy->phy_name); /*PC90*/ if (cmd == PC_ENABLE) { GO_STATE(PC0_OFF) ; @@ -1126,13 +1123,12 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy) } if (lem->lem_errors) { - DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ; - DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ; - DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ; - DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ; - DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ; - DB_PCMN(1,"avg. BER : 10E-%d\n", - mib->fddiPORTLer_Estimate,0) ; + DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A'); + DB_PCMN(1, "errors : %ld", lem->lem_errors); + DB_PCMN(1, "sum_errors : %ld", mib->fddiPORTLem_Ct); + DB_PCMN(1, "current BER : 10E-%d", ber / 100); + DB_PCMN(1, "float BER : 10E-(%d/100)", lem->lem_float_ber); + DB_PCMN(1, "avg. BER : 10E-%d", mib->fddiPORTLer_Estimate); } lem->lem_errors = 0L ; @@ -1160,8 +1156,8 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy) /*PC81b*/ #ifdef CONCENTRATOR - DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n", - phy->np, mib->fddiPORTLer_Cutoff) ; + DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d", + phy->np, mib->fddiPORTLer_Cutoff); #endif #ifdef SMT_EXT_CUTOFF smt_port_off_event(smc,phy->np); @@ -1213,7 +1209,7 @@ static void lem_check_lct(struct s_smc *smc, struct s_phy *phy) phy->pc_lem_fail = TRUE ; break ; } - DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ; + DB_PCMN(1, " >>errors : %lu", lem->lem_errors); } if (phy->pc_lem_fail) { mib->fddiPORTLCTFail_Ct++ ; @@ -1277,7 +1273,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) mib = phy->mib ; - DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ; + DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]); bit++ ; switch(bit) { @@ -1298,8 +1294,8 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) case 4: if (mib->fddiPORTMy_Type == TM && mib->fddiPORTNeighborType == TM) { - DB_PCMN(1,"PCM %c : E100 withhold M-M\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E100 withhold M-M", + phy->phy_name); mib->fddiPORTPC_Withhold = PC_WH_M_M ; RS_SET(smc,RS_EVENT) ; } @@ -1321,16 +1317,16 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) else { mib->fddiPORTPC_Withhold = PC_WH_OTHER ; RS_SET(smc,RS_EVENT) ; - DB_PCMN(1,"PCM %c : E101 withhold other\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E101 withhold other", + phy->phy_name); } phy->twisted = ((mib->fddiPORTMy_Type != TS) && (mib->fddiPORTMy_Type != TM) && (mib->fddiPORTNeighborType == mib->fddiPORTMy_Type)) ; if (phy->twisted) { - DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!", + phy->phy_name); } break ; case 5 : @@ -1368,7 +1364,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) if (phy->t_next[7] > smc->s.pcm_lc_medium) { start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy); } - DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ; + DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]); phy->t_next[9] = smc->s.pcm_t_next_9 ; break ; case 7: @@ -1379,8 +1375,9 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) break ; case 8: if (phy->t_val[7] || phy->r_val[7]) { - DB_PCMN(1,"PCM %c : E103 LCT fail %s\n", - phy->phy_name,phy->t_val[7]? "local":"remote") ; + DB_PCMN(1, "PCM %c : E103 LCT fail %s", + phy->phy_name, + phy->t_val[7] ? "local" : "remote"); queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ; } break ; @@ -1529,8 +1526,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy phy->cf_loop = FALSE ; lem_check_lct(smc,phy) ; if (phy->pc_lem_fail) { - DB_PCMN(1,"PCM %c : E104 LCT failed\n", - phy->phy_name,0) ; + DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name); phy->t_val[7] = 1 ; } else @@ -1580,7 +1576,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; break ; } - DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ; + DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]); } /* @@ -1783,13 +1779,14 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) } /*jd 05-Aug-1999 changed: Bug #10419 */ - DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag); + DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag); if (smc->e.DisconnectFlag == FALSE) { - DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason); + DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason); queue_event(smc,EVENT_PCM+np,PC_START) ; } else { - DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason); + DB_PCMN(1, "PLC %d: NO!! restart (reason %x)", + np, reason); } return ; } @@ -1810,8 +1807,8 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */ /*PC22b*/ if (!phy->tr_flag) { - DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n", - np,smc->mib.fddiSMTECMState) ; + DB_PCMN(1, "PCM : irq TRACE_PROP %d %d", + np, smc->mib.fddiSMTECMState); phy->tr_flag = TRUE ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; @@ -1824,8 +1821,9 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) { /*PC22a*/ if (smc->e.path_test == PT_PASSED) { - DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np), - phy->mib->fddiPORTPCMState) ; + DB_PCMN(1, "PCM : state = %s %d", + get_pcmstate(smc, np), + phy->mib->fddiPORTPCMState); smc->e.path_test = PT_PENDING ; queue_event(smc,EVENT_ECM,EC_PATH_TEST) ; @@ -1835,9 +1833,10 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) /* break_required (TNE > NS_Max) */ if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) { if (!phy->tr_flag) { - DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE"); - queue_event(smc,EVENT_PCM+np,PC_START) ; - return ; + DB_PCMN(1, "PCM %c : PC81 %s", + phy->phy_name, "NSE"); + queue_event(smc, EVENT_PCM + np, PC_START); + return; } } } diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 52fa162a31e0..eee447315e32 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c @@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local) SMbuf *reply ; sm = smtod(mb,struct smt_header *) ; - DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ; + DB_SMT("SMT: processing PMF frame at %p len %d", sm, mb->sm_len); #ifdef DEBUG dump_smt(smc,sm,"PMF Received") ; #endif @@ -1585,7 +1585,7 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text) dump_hex((char *) &sm->smt_source,6) ; printf(" Class %x Type %x Version %x\n", sm->smt_class,sm->smt_type,sm->smt_version) ; - printf("TID %lx\t\tSID ",sm->smt_tid) ; + printf("TID %x\t\tSID ", sm->smt_tid); dump_hex((char *) &sm->smt_sid,8) ; printf(" LEN %x\n",sm->smt_len) ; diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c index ef8d5672d9e8..52b22095273a 100644 --- a/drivers/net/fddi/skfp/rmt.c +++ b/drivers/net/fddi/skfp/rmt.c @@ -70,7 +70,6 @@ static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ; #define RM6_DIRECTED 6 /* sending directed beacons */ #define RM7_TRACE 7 /* trace initiated */ -#ifdef DEBUG /* * symbolic state names */ @@ -91,7 +90,6 @@ static const char * const rmt_events[] = { "RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT", "RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE" } ; -#endif /* * Globals @@ -149,10 +147,10 @@ void rmt(struct s_smc *smc, int event) int state ; do { - DB_RMT("RMT : state %s%s", - (smc->mib.m[MAC0].fddiMACRMTState & AFLAG) ? "ACTIONS " : "", - rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG]) ; - DB_RMT(" event %s\n",rmt_events[event],0) ; + DB_RMT("RMT : state %s%s event %s", + smc->mib.m[MAC0].fddiMACRMTState & AFLAG ? "ACTIONS " : "", + rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG], + rmt_events[event]); state = smc->mib.m[MAC0].fddiMACRMTState ; rmt_fsm(smc,event) ; event = 0 ; @@ -191,7 +189,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) smc->r.loop_avail = FALSE ; smc->r.sm_ma_avail = FALSE ; smc->r.no_flag = TRUE ; - DB_RMTN(1,"RMT : ISOLATED\n",0,0) ; + DB_RMTN(1, "RMT : ISOLATED"); ACTIONS_DONE() ; break ; case RM0_ISOLATED : @@ -213,7 +211,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; sm_ma_control(smc,MA_BEACON) ; - DB_RMTN(1,"RMT : RING DOWN\n",0,0) ; + DB_RMTN(1, "RMT : RING DOWN"); RS_SET(smc,RS_NORINGOP) ; smc->r.sm_ma_avail = FALSE ; rmt_indication(smc,0) ; @@ -248,7 +246,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) else smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; } - DB_RMTN(1,"RMT : RING UP\n",0,0) ; + DB_RMTN(1, "RMT : RING UP"); RS_CLEAR(smc,RS_NORINGOP) ; RS_SET(smc,RS_RINGOPCHANGE) ; rmt_indication(smc,1) ; @@ -285,7 +283,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; - DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ; + DB_RMTN(1, "RMT : RM3_DETECT"); ACTIONS_DONE() ; break ; case RM3_DETECT : @@ -327,7 +325,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { - DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0); + DB_RMTN(2, "RMT : DETECT && TRT_EXPIRED && T4/T5"); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed @@ -344,9 +342,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * must be cleared in order to get in this condition. */ - DB_RMTN(2, - "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", - tx,smc->r.bn_flag) ; + DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)", + tx, smc->r.bn_flag); } /*RM34a*/ else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) { @@ -378,7 +375,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; - DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ; + DB_RMTN(1, "RMT : RM4_NON_OP_DUP"); ACTIONS_DONE() ; break ; case RM4_NON_OP_DUP : @@ -406,7 +403,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { - DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0); + DB_RMTN(2, "RMT : NOPDUP && TRT_EXPIRED && T4/T5"); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed @@ -423,9 +420,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd) * must be cleared in order to get in this condition. */ - DB_RMTN(2, - "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", - tx,smc->r.bn_flag) ; + DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)", + tx, smc->r.bn_flag); } /*RM44c*/ else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) { @@ -448,7 +444,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; - DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ; + DB_RMTN(1, "RMT : RM5_RING_OP_DUP"); ACTIONS_DONE() ; break; case RM5_RING_OP_DUP : @@ -472,7 +468,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_ma_control(smc,MA_DIRECTED) ; RS_SET(smc,RS_BEACON) ; - DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ; + DB_RMTN(1, "RMT : RM6_DIRECTED"); ACTIONS_DONE() ; break ; case RM6_DIRECTED : @@ -515,7 +511,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd) stop_rmt_timer2(smc) ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; - DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ; + DB_RMTN(1, "RMT : RM7_TRACE"); ACTIONS_DONE() ; break ; case RM7_TRACE : diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index e80a08903fcf..ab939ae7e5b5 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -35,7 +35,6 @@ static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; #define SMT_TID_MAGIC 0x1f0a7b3c -#ifdef DEBUG static const char *const smt_type_name[] = { "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??", "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??", @@ -47,7 +46,7 @@ static const char *const smt_class_name[] = { "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF", "SRF","PMF_GET","PMF_SET","ESF" } ; -#endif + #define LAST_CLASS (SMT_PMF_SET) static const struct fddi_addr SMT_Unknown = { @@ -203,7 +202,7 @@ void smt_agent_task(struct s_smc *smc) { smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L, EV_TOKEN(EVENT_SMT,SM_TIMER)) ; - DB_SMT("SMT agent task\n",0,0) ; + DB_SMT("SMT agent task"); } #ifndef SMT_REAL_TOKEN_CT @@ -396,7 +395,7 @@ void smt_event(struct s_smc *smc, int event) */ if (smc->sm.smt_tvu && time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) { - DB_SMT("SMT : UNA expired\n",0,0) ; + DB_SMT("SMT : UNA expired"); smc->sm.smt_tvu = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr, @@ -419,7 +418,7 @@ void smt_event(struct s_smc *smc, int event) } if (smc->sm.smt_tvd && time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) { - DB_SMT("SMT : DNA expired\n",0,0) ; + DB_SMT("SMT : DNA expired"); smc->sm.smt_tvd = 0 ; if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr, &SMT_Unknown)){ @@ -504,10 +503,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) #endif smt_swap_para(sm,(int) mb->sm_len,1) ; - DB_SMT("SMT : received packet [%s] at 0x%p\n", - smt_type_name[m_fc(mb) & 0xf],sm) ; - DB_SMT("SMT : version %d, class %s\n",sm->smt_version, - smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ; + DB_SMT("SMT : received packet [%s] at 0x%p", + smt_type_name[m_fc(mb) & 0xf], sm); + DB_SMT("SMT : version %d, class %s", + sm->smt_version, + smt_class_name[sm->smt_class > LAST_CLASS ? 0 : sm->smt_class]); #ifdef SBA /* @@ -524,8 +524,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * ignore any packet with NSA and A-indicator set */ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) { - DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : ignoring NSA with A-indicator set from %s", + addr_to_string(&sm->smt_source)); smt_free_mbuf(smc,mb) ; return ; } @@ -556,15 +556,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) break ; } if (illegal) { - DB_SMT("SMT : version = %d, dest = %s\n", - sm->smt_version,addr_to_string(&sm->smt_source)) ; + DB_SMT("SMT : version = %d, dest = %s", + sm->smt_version, addr_to_string(&sm->smt_source)); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ; smt_free_mbuf(smc,mb) ; return ; } if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) || ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { - DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ; + DB_SMT("SMT: info length error, len = %d", sm->smt_len); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ; smt_free_mbuf(smc,mb) ; return ; @@ -572,7 +572,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) switch (sm->smt_class) { case SMT_NIF : if (smt_check_para(smc,sm,plist_nif)) { - DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ; + DB_SMT("SMT: NIF with para problem, ignoring"); break ; } switch (sm->smt_type) { @@ -586,8 +586,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) if (!is_equal( &smc->mib.m[MAC0].fddiMACUpstreamNbr, &sm->smt_source)) { - DB_SMT("SMT : updated my UNA = %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : updated my UNA = %s", + addr_to_string(&sm->smt_source)); if (!is_equal(&smc->mib.m[MAC0]. fddiMACUpstreamNbr,&SMT_Unknown)){ /* Do not update unknown address */ @@ -616,8 +616,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) is_individual(&sm->smt_source) && ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) || (m_fc(mb) != FC_SMT_NSA))) { - DB_SMT("SMT : replying to NIF request %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to NIF request %s", + addr_to_string(&sm->smt_source)); smt_send_nif(smc,&sm->smt_source, FC_SMT_INFO, sm->smt_tid, @@ -625,11 +625,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) } break ; case SMT_REPLY : - DB_SMT("SMT : received NIF response from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : received NIF response from %s", + addr_to_string(&sm->smt_source)); if (fs & A_INDICATOR) { smc->sm.pend[SMT_TID_NIF] = 0 ; - DB_SMT("SMT : duplicate address\n",0,0) ; + DB_SMT("SMT : duplicate address"); smc->mib.m[MAC0].fddiMACDupAddressTest = DA_FAILED ; smc->r.dup_addr_test = DA_FAILED ; @@ -644,7 +644,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) if (!is_equal( &smc->mib.m[MAC0].fddiMACDownstreamNbr, &sm->smt_source)) { - DB_SMT("SMT : updated my DNA\n",0,0) ; + DB_SMT("SMT : updated my DNA"); if (!is_equal(&smc->mib.m[MAC0]. fddiMACDownstreamNbr, &SMT_Unknown)){ /* Do not update unknown address */ @@ -671,11 +671,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) } else if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF_TEST]) { - DB_SMT("SMT : NIF test TID ok\n",0,0) ; + DB_SMT("SMT : NIF test TID ok"); } else { - DB_SMT("SMT : expected TID %lx, got %lx\n", - smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ; + DB_SMT("SMT : expected TID %lx, got %x", + smc->sm.pend[SMT_TID_NIF], sm->smt_tid); } break ; default : @@ -686,53 +686,53 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) case SMT_SIF_CONFIG : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Config request from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to SIF Config request from %s", + addr_to_string(&sm->smt_source)); smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_SIF_OPER : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Operation request from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT : replying to SIF Operation request from %s", + addr_to_string(&sm->smt_source)); smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_ECF : /* echo frame */ switch (sm->smt_type) { case SMT_REPLY : smc->mib.priv.fddiPRIVECF_Reply_Rx++ ; - DB_SMT("SMT: received ECF reply from %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT: received ECF reply from %s", + addr_to_string(&sm->smt_source)); if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) { - DB_SMT("SMT: ECHODATA missing\n",0,0) ; + DB_SMT("SMT: ECHODATA missing"); break ; } if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { - DB_SMT("SMT : ECF test TID ok\n",0,0) ; + DB_SMT("SMT : ECF test TID ok"); } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { - DB_SMT("SMT : ECF test UNA ok\n",0,0) ; + DB_SMT("SMT : ECF test UNA ok"); } else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { - DB_SMT("SMT : ECF test DNA ok\n",0,0) ; + DB_SMT("SMT : ECF test DNA ok"); } else { - DB_SMT("SMT : expected TID %lx, got %lx\n", - smc->sm.pend[SMT_TID_ECF], - sm->smt_tid) ; + DB_SMT("SMT : expected TID %lx, got %x", + smc->sm.pend[SMT_TID_ECF], + sm->smt_tid); } break ; case SMT_REQUEST : smc->mib.priv.fddiPRIVECF_Req_Rx++ ; { if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { - DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ; + DB_SMT("SMT: ECF with para problem,sending RDF"); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH, local) ; break ; } - DB_SMT("SMT - sending ECF reply to %s\n", - addr_to_string(&sm->smt_source),0) ; + DB_SMT("SMT - sending ECF reply to %s", + addr_to_string(&sm->smt_source)); /* set destination addr. & reply */ sm->smt_dest = sm->smt_source ; @@ -750,7 +750,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) #ifndef BOOT case SMT_RAF : /* resource allocation */ #ifdef ESS - DB_ESSN(2,"ESS: RAF frame received\n",0,0) ; + DB_ESSN(2, "ESS: RAF frame received"); fs = ess_raf_received_pack(smc,mb,sm,fs) ; #endif @@ -764,7 +764,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) break ; case SMT_ESF : /* extended service - not supported */ if (sm->smt_type == SMT_REQUEST) { - DB_SMT("SMT - received ESF, sending RDF\n",0,0) ; + DB_SMT("SMT - received ESF, sending RDF"); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; } break ; @@ -782,7 +782,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) */ if ((sm->smt_class == SMT_PMF_SET) && !is_individual(&sm->smt_dest)) { - DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ; + DB_SMT("SMT: ignoring PMF-SET with I/G set"); break ; } smt_pmf_received_pack(smc,mb, local) ; @@ -798,16 +798,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * we need to send a RDF frame according to 8.1.3.1.1, * only if it is a REQUEST. */ - DB_SMT("SMT : class = %d, send RDF to %s\n", - sm->smt_class, addr_to_string(&sm->smt_source)) ; + DB_SMT("SMT : class = %d, send RDF to %s", + sm->smt_class, addr_to_string(&sm->smt_source)); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; break ; #endif } if (illegal) { - DB_SMT("SMT: discarding invalid frame, reason = %d\n", - illegal,0) ; + DB_SMT("SMT: discarding invalid frame, reason = %d", illegal); } smt_free_mbuf(smc,mb) ; } @@ -869,8 +868,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, if (sm->smt_type != SMT_REQUEST) return ; - DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n", - addr_to_string(&sm->smt_source),reason) ; + DB_SMT("SMT: sending RDF to %s,reason = 0x%x", + addr_to_string(&sm->smt_source), reason); /* @@ -1653,7 +1652,7 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short *p = list ; while (*p) { if (!sm_to_para(smc,sm,(int) *p)) { - DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); + DB_SMT("SMT: smt_check_para - missing para %hx", *p); return -1; } p++ ; @@ -1679,11 +1678,11 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) p += plen ; len -= plen ; if (len < 0) { - DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ; + DB_SMT("SMT : sm_to_para - length error %d", plen); return NULL; } if ((plen & 3) && (para != SMT_P_ECHODATA)) { - DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ; + DB_SMT("SMT : sm_to_para - odd length %d", plen); return NULL; } if (found) @@ -1937,7 +1936,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index) { int event ; int port ; - DB_SMT("SMT: action %d code %d\n",class,code) ; + DB_SMT("SMT: action %d code %d", class, code); switch(class) { case SMT_STATION_ACTION : switch(code) { diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c index 9956680402de..4e286c1ba9cd 100644 --- a/drivers/net/fddi/skfp/srf.c +++ b/drivers/net/fddi/skfp/srf.c @@ -173,7 +173,6 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index) #define THRESHOLD_2 (2*TICKS_PER_SECOND) #define THRESHOLD_32 (32*TICKS_PER_SECOND) -#ifdef DEBUG static const char * const srf_names[] = { "None","MACPathChangeEvent", "MACNeighborChangeEvent", "PORTPathChangeEvent", "PORTUndesiredConnectionAttemptEvent", @@ -182,7 +181,6 @@ static const char * const srf_names[] = { "MACNotCopiedCondition", "PORTEBErrorCondition", "PORTLerCondition" } ; -#endif void smt_srf_event(struct s_smc *smc, int code, int index, int cond) { @@ -198,10 +196,10 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond) } if (code) { - DB_SMT("SRF: %s index %d\n",srf_names[code],index) ; + DB_SMT("SRF: %s index %d", srf_names[code], index); if (!(evc = smt_get_evc(smc,code,index))) { - DB_SMT("SRF : smt_get_evc() failed\n",0,0) ; + DB_SMT("SRF : smt_get_evc() failed"); return ; } /* @@ -217,7 +215,7 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond) */ smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ; if (SMT_IS_CONDITION(code)) { - DB_SMT("SRF: condition is %s\n",cond ? "ON":"OFF",0) ; + DB_SMT("SRF: condition is %s", cond ? "ON" : "OFF"); if (cond) { *evc->evc_cond_state = TRUE ; evc->evc_rep_required = TRUE ; @@ -414,9 +412,9 @@ static void smt_send_srf(struct s_smc *smc) smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; - DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ; - DB_SMT("SRF: state SR%d Threshold %d\n", - smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; + DB_SMT("SRF: sending SRF at %p, len %d", smt, mb->sm_len); + DB_SMT("SRF: state SR%d Threshold %lu", + smc->srf.sr_state, smc->srf.SRThreshold / TICKS_PER_SECOND); #ifdef DEBUG dump_smt(smc,smt,"SRF Send") ; #endif diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b77e4ecf3cf2..b75d9cdcfb0c 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -57,8 +57,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *); static void fjes_tx_stall_task(struct work_struct *); static void fjes_force_close_task(struct work_struct *); static irqreturn_t fjes_intr(int, void*); -static struct rtnl_link_stats64 * -fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); +static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); static int fjes_change_mtu(struct net_device *, int); static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); @@ -782,14 +781,12 @@ static void fjes_tx_retry(struct net_device *netdev) netif_tx_wake_queue(queue); } -static struct rtnl_link_stats64 * +static void fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct fjes_adapter *adapter = netdev_priv(netdev); memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); - - return stats; } static int fjes_change_mtu(struct net_device *netdev, int new_mtu) @@ -1158,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (adapter->unset_rx_last) { adapter->rx_last_jiffies = jiffies; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 99d3df788ce8..bda0c6413450 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -183,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -196,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, if (gtp0->type != GTP_TPDU) return 1; - rcu_read_lock(); pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return 1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, @@ -225,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; - int ret = 0; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -253,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); - rcu_read_lock(); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); - ret = -1; - goto out_rcu; + return 1; } if (!gtp_check_src_ms(skb, pctx, hdrlen)) { netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - ret = -1; - goto out_rcu; + return 1; } - rcu_read_unlock(); /* Get rid of the GTP + UDP headers. */ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); -out_rcu: - rcu_read_unlock(); - return ret; } static void gtp_encap_disable(struct gtp_dev *gtp) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 3958adade7eb..d3e73ac158ae 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -34,6 +34,7 @@ #define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88 #define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89 +#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7 #define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2 #define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2 @@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ /* Fwd declaration */ struct ndis_tcp_ip_checksum_info; +struct ndis_pkt_8021q_info; /* * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame @@ -135,8 +137,10 @@ struct hv_netvsc_packet { u8 page_buf_cnt; u16 q_idx; - u32 send_buf_index; + u16 total_packets; + u32 total_bytes; + u32 send_buf_index; u32 total_data_buflen; }; @@ -155,6 +159,8 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; +#define NETVSC_HASH_KEYLEN 40 + struct rndis_device { struct net_device *ndev; @@ -165,14 +171,17 @@ struct rndis_device { spinlock_t request_lock; struct list_head req_list; - unsigned char hw_mac_adr[ETH_ALEN]; + u8 hw_mac_adr[ETH_ALEN]; + u8 rss_key[NETVSC_HASH_KEYLEN]; + u16 ind_table[ITAB_NUM]; }; /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, void *additional_info); +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, @@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci); +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, - void *additional_info); -void rndis_filter_device_remove(struct hv_device *dev); -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel); + struct netvsc_device_info *info); +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *nvdev); +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *key, int num_queue); +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen); int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); @@ -622,6 +634,7 @@ struct nvsp_message { #define VRSS_SEND_TAB_SIZE 16 #define VRSS_CHANNEL_MAX 64 +#define VRSS_CHANNEL_DEFAULT 8 #define RNDIS_MAX_PKT_DEFAULT 8 #define RNDIS_PKT_ALIGN_DEFAULT 8 @@ -685,8 +698,7 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ - struct netvsc_stats __percpu *tx_stats; - struct netvsc_stats __percpu *rx_stats; + u32 tx_checksum_mask; /* Ethtool settings */ u8 duplex; @@ -705,11 +717,21 @@ struct net_device_context { u32 vf_serial; }; +/* Per channel data */ +struct netvsc_channel { + struct vmbus_channel *channel; + struct multi_send_data msd; + struct multi_recv_comp mrc; + atomic_t queue_sends; + + struct netvsc_stats tx_stats; + struct netvsc_stats rx_stats; +}; + /* Per netvsc device */ struct netvsc_device { u32 nvsp_version; - atomic_t num_outstanding_sends; wait_queue_head_t wait_drain; bool destroy; @@ -735,32 +757,25 @@ struct netvsc_device { struct nvsp_message revoke_packet; - struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX]; u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; spinlock_t sc_lock; /* Protects num_sc_offered variable */ u32 num_sc_offered; - atomic_t queue_sends[VRSS_CHANNEL_MAX]; /* Holds rndis device info */ void *extension; int ring_size; - /* The primary channel callback buffer */ - unsigned char *cb_buffer; - /* The sub channel callback buffer */ - unsigned char *sub_cb_buf; - - struct multi_send_data msd[VRSS_CHANNEL_MAX]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - struct multi_recv_comp mrc[VRSS_CHANNEL_MAX]; atomic_t num_outstanding_recvs; atomic_t open_cnt; + + struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; }; static inline struct netvsc_device * @@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info { }; }; -struct ndis_oject_header { +struct ndis_object_header { u8 type; u8 revision; u16 size; @@ -947,6 +962,9 @@ struct ndis_oject_header { #define NDIS_OBJECT_TYPE_DEFAULT 0x80 #define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2 +#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1 + #define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2 @@ -973,8 +991,135 @@ struct ndis_oject_header { #define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */ #define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */ +/* + * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_OFFLOAD + */ + +#define NDIS_OFFLOAD_ENCAP_NONE 0x0000 +#define NDIS_OFFLOAD_ENCAP_NULL 0x0001 +#define NDIS_OFFLOAD_ENCAP_8023 0x0002 +#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004 +#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008 +#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010 + +struct ndis_csum_offload { + u32 ip4_txenc; + u32 ip4_txcsum; +#define NDIS_TXCSUM_CAP_IP4OPT 0x001 +#define NDIS_TXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP4 0x010 +#define NDIS_TXCSUM_CAP_UDP4 0x040 +#define NDIS_TXCSUM_CAP_IP4 0x100 + +#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT) + + u32 ip4_rxenc; + u32 ip4_rxcsum; +#define NDIS_RXCSUM_CAP_IP4OPT 0x001 +#define NDIS_RXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP4 0x010 +#define NDIS_RXCSUM_CAP_UDP4 0x040 +#define NDIS_RXCSUM_CAP_IP4 0x100 + u32 ip6_txenc; + u32 ip6_txcsum; +#define NDIS_TXCSUM_CAP_IP6EXT 0x001 +#define NDIS_TXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP6 0x010 +#define NDIS_TXCSUM_CAP_UDP6 0x040 + u32 ip6_rxenc; + u32 ip6_rxcsum; +#define NDIS_RXCSUM_CAP_IP6EXT 0x001 +#define NDIS_RXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP6 0x010 +#define NDIS_RXCSUM_CAP_UDP6 0x040 + +#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \ + NDIS_TXCSUM_CAP_TCP6OPT | \ + NDIS_TXCSUM_CAP_IP6EXT) +}; + +struct ndis_lsov1_offload { + u32 encap; + u32 maxsize; + u32 minsegs; + u32 opts; +}; + +struct ndis_ipsecv1_offload { + u32 encap; + u32 ah_esp; + u32 xport_tun; + u32 ip4_opts; + u32 flags; + u32 ip4_ah; + u32 ip4_esp; +}; + +struct ndis_lsov2_offload { + u32 ip4_encap; + u32 ip4_maxsz; + u32 ip4_minsg; + u32 ip6_encap; + u32 ip6_maxsz; + u32 ip6_minsg; + u32 ip6_opts; +#define NDIS_LSOV2_CAP_IP6EXT 0x001 +#define NDIS_LSOV2_CAP_TCP6OPT 0x004 + +#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \ + NDIS_LSOV2_CAP_TCP6OPT) +}; + +struct ndis_ipsecv2_offload { + u32 encap; + u16 ip6; + u16 ip4opt; + u16 ip6ext; + u16 ah; + u16 esp; + u16 ah_esp; + u16 xport; + u16 tun; + u16 xport_tun; + u16 lso; + u16 extseq; + u32 udp_esp; + u32 auth; + u32 crypto; + u32 sa_caps; +}; + +struct ndis_rsc_offload { + u16 ip4; + u16 ip6; +}; + +struct ndis_encap_offload { + u32 flags; + u32 maxhdr; +}; + +struct ndis_offload { + struct ndis_object_header header; + struct ndis_csum_offload csum; + struct ndis_lsov1_offload lsov1; + struct ndis_ipsecv1_offload ipsecv1; + struct ndis_lsov2_offload lsov2; + u32 flags; + /* NDIS >= 6.1 */ + struct ndis_ipsecv2_offload ipsecv2; + /* NDIS >= 6.30 */ + struct ndis_rsc_offload rsc; + struct ndis_encap_offload encap_gre; +}; + +#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload) +#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2) +#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc) + struct ndis_offload_params { - struct ndis_oject_header header; + struct ndis_object_header header; u8 ip_v4_csum; u8 tcp_ip_v4_csum; u8 udp_ip_v4_csum; @@ -1301,15 +1446,10 @@ struct rndis_message { #define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400 #define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800 -#define INFO_IPV4 2 -#define INFO_IPV6 4 -#define INFO_TCP 2 -#define INFO_UDP 4 - #define TRANSPORT_INFO_NOT_IP 0 -#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP) -#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP) -#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP) +#define TRANSPORT_INFO_IPV4_TCP 0x01 +#define TRANSPORT_INFO_IPV4_UDP 0x02 +#define TRANSPORT_INFO_IPV6_TCP 0x10 +#define TRANSPORT_INFO_IPV6_UDP 0x20 #endif /* _HYPERV_NET_H */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 86e5749226ef..f260e38b2f66 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); - if (!net_device->cb_buffer) { - kfree(net_device); - return NULL; - } - - net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + net_device->chan_table[0].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; @@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->mrc[i].buf); + vfree(nvdev->chan_table[i].mrc.buf); - kfree(nvdev->cb_buffer); kfree(nvdev); } -static struct netvsc_device *get_outbound_net_device(struct hv_device *device) -{ - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (net_device && net_device->destroy) - net_device = NULL; +static inline bool netvsc_channel_idle(const struct netvsc_device *net_device, + u16 q_idx) +{ + const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; - return net_device; + return atomic_read(&net_device->num_outstanding_recvs) == 0 && + atomic_read(&nvchan->queue_sends) == 0; } -static struct netvsc_device *get_inbound_net_device(struct hv_device *device) +static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device = hv_device_to_netvsc_device(device); - if (!net_device) - goto get_in_err; - - if (net_device->destroy && - atomic_read(&net_device->num_outstanding_sends) == 0 && - atomic_read(&net_device->num_outstanding_recvs) == 0) + if (net_device && net_device->destroy) net_device = NULL; -get_in_err: return net_device; } @@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device) vmbus_close(device->channel); /* Release all resources */ - vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); } @@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); struct vmbus_channel *channel = device->channel; - int num_outstanding_sends; u16 q_idx = 0; int queue_sends; /* Notify the layer above us */ if (likely(skb)) { - struct hv_netvsc_packet *nvsc_packet + const struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; - u32 send_index = nvsc_packet->send_buf_index; + u32 send_index = packet->send_buf_index; + struct netvsc_stats *tx_stats; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); - q_idx = nvsc_packet->q_idx; + q_idx = packet->q_idx; channel = incoming_channel; + tx_stats = &net_device->chan_table[q_idx].tx_stats; + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets += packet->total_packets; + tx_stats->bytes += packet->total_bytes; + u64_stats_update_end(&tx_stats->syncp); + dev_consume_skb_any(skb); } - num_outstanding_sends = - atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]); + queue_sends = + atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && num_outstanding_sends == 0) + if (net_device->destroy && queue_sends == 0) wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && @@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device, static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) { - unsigned long index; - u32 max_words = net_device->map_words; - unsigned long *map_addr = (unsigned long *)net_device->send_section_map; - u32 section_cnt = net_device->send_section_cnt; - int ret_val = NETVSC_INVALID_INDEX; - int i; - int prev_val; - - for (i = 0; i < max_words; i++) { - if (!~(map_addr[i])) - continue; - index = ffz(map_addr[i]); - prev_val = sync_test_and_set_bit(index, &map_addr[i]); - if (prev_val) - continue; - if ((index + (i * BITS_PER_LONG)) >= section_cnt) - break; - ret_val = (index + (i * BITS_PER_LONG)); - break; + unsigned long *map_addr = net_device->send_section_map; + unsigned int i; + + for_each_clear_bit(i, map_addr, net_device->map_words) { + if (sync_test_and_set_bit(i, map_addr) == 0) + return i; } - return ret_val; + + return NETVSC_INVALID_INDEX; } static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, @@ -765,9 +745,11 @@ static inline int netvsc_send_pkt( struct sk_buff *skb) { struct nvsp_message nvmsg; - u16 q_idx = packet->q_idx; - struct vmbus_channel *out_channel = net_device->chn_table[q_idx]; + struct netvsc_channel *nvchan + = &net_device->chan_table[packet->q_idx]; + struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); + struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; struct hv_page_buffer *pgbuf; @@ -827,23 +809,14 @@ static inline int netvsc_send_pkt( } if (ret == 0) { - atomic_inc(&net_device->num_outstanding_sends); - atomic_inc(&net_device->queue_sends[q_idx]); - - if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { - netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); + atomic_inc_return(&nvchan->queue_sends); - if (atomic_read(&net_device-> - queue_sends[q_idx]) < 1) - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); - } + if (ring_avail < RING_AVAIL_PERCENT_LOWATER) + netif_tx_stop_queue(txq); } else if (ret == -EAGAIN) { - netif_tx_stop_queue(netdev_get_tx_queue( - ndev, q_idx)); - if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); + netif_tx_stop_queue(txq); + if (atomic_read(&nvchan->queue_sends) < 1) { + netif_tx_wake_queue(txq); ret = -ENOSPC; } } else { @@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device, { struct netvsc_device *net_device; int ret = 0; - struct vmbus_channel *out_channel; - u16 q_idx = packet->q_idx; + struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; unsigned int section_index = NETVSC_INVALID_INDEX; struct multi_send_data *msdp; @@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device, if (!net_device->send_section_map) return -EAGAIN; - out_channel = net_device->chn_table[q_idx]; - + nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device, goto send_now; } - msdp = &net_device->msd[q_idx]; - /* batch packets in send buffer if possible */ + msdp = &nvchan->msd; if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; @@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device, packet->total_data_buflen += msd_len; } + if (msdp->pkt) { + packet->total_packets += msdp->pkt->total_packets; + packet->total_bytes += msdp->pkt->total_bytes; + } + if (msdp->skb) dev_consume_skb_any(msdp->skb); @@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel, static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, u32 *filled, u32 *avail) { - u32 first = nvdev->mrc[q_idx].first; - u32 next = nvdev->mrc[q_idx].next; + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; + u32 first = mrc->first; + u32 next = mrc->next; *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : next - first; @@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; count_recv_comp_slot(nvdev, q_idx, &filled, &avail); if (!filled) return NULL; - return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first * - sizeof(struct recv_comp_data); + return mrc->buf + mrc->first * sizeof(struct recv_comp_data); } /* Put the first filled slot back to available pool */ static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; int num_recv; - nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) % - NETVSC_RECVSLOT_MAX; + mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); @@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, static inline struct recv_comp_data *get_recv_comp_slot( struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) { + struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; u32 filled, avail, next; struct recv_comp_data *rcd; - if (!nvdev->recv_section) + if (unlikely(!nvdev->recv_section)) return NULL; - if (!nvdev->mrc[q_idx].buf) + if (unlikely(!mrc->buf)) return NULL; if (atomic_read(&nvdev->num_outstanding_recvs) > @@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot( if (!avail) return NULL; - next = nvdev->mrc[q_idx].next; - rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data); - nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX; + next = mrc->next; + rcd = mrc->buf + next * sizeof(struct recv_comp_data); + mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; atomic_inc(&nvdev->num_outstanding_recvs); return rcd; } -static void netvsc_receive(struct netvsc_device *net_device, - struct vmbus_channel *channel, - struct hv_device *device, - struct vmpacket_descriptor *packet) +static void netvsc_receive(struct net_device *ndev, + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + struct vmtransfer_page_packet_header *vmxferpage_packet, + struct nvsp_message *nvsp) { - struct vmtransfer_page_packet_header *vmxferpage_packet; - struct nvsp_message *nvsp_packet; - struct hv_netvsc_packet nv_pkt; - struct hv_netvsc_packet *netvsc_packet = &nv_pkt; + char *recv_buf = net_device->recv_buf; u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - struct net_device *ndev = hv_get_drvdata(device); - void *data; int ret; struct recv_comp_data *rcd; u16 q_idx = channel->offermsg.offer.sub_channel_index; - /* - * All inbound packets other than send completion should be xfer page - * packet - */ - if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { - netdev_err(ndev, "Unknown packet type received - %d\n", - packet->type); - return; - } - - nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); - /* Make sure this is a valid nvsp packet */ - if (nvsp_packet->hdr.msg_type != - NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { - netdev_err(ndev, "Unknown nvsp packet type received-" - " %d\n", nvsp_packet->hdr.msg_type); + if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { + netif_err(net_device_ctx, rx_err, ndev, + "Unknown nvsp packet type received %u\n", + nvsp->hdr.msg_type); return; } - vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; - - if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { - netdev_err(ndev, "Invalid xfer page set id - " - "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, - vmxferpage_packet->xfer_pageset_id); + if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { + netif_err(net_device_ctx, rx_err, ndev, + "Invalid xfer page set id - expecting %x got %x\n", + NETVSC_RECEIVE_BUFFER_ID, + vmxferpage_packet->xfer_pageset_id); return; } @@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device, /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { - /* Initialize the netvsc packet */ - data = (void *)((unsigned long)net_device-> - recv_buf + vmxferpage_packet->ranges[i].byte_offset); - netvsc_packet->total_data_buflen = - vmxferpage_packet->ranges[i].byte_count; + void *data = recv_buf + + vmxferpage_packet->ranges[i].byte_offset; + u32 buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - status = rndis_filter_receive(device, netvsc_packet, &data, - channel); + status = rndis_filter_receive(ndev, net_device, device, + channel, data, buflen); } - if (!net_device->mrc[q_idx].buf) { + if (!net_device->chan_table[q_idx].mrc.buf) { ret = netvsc_send_recv_completion(channel, vmxferpage_packet->d.trans_id, status); @@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, u64 request_id, struct vmpacket_descriptor *desc) { - struct nvsp_message *nvmsg; struct net_device_context *net_device_ctx = netdev_priv(ndev); - - nvmsg = (struct nvsp_message *)((unsigned long) - desc + (desc->offset8 << 3)); + struct nvsp_message *nvmsg + = (struct nvsp_message *)((unsigned long)desc + + (desc->offset8 << 3)); switch (desc->type) { case VM_PKT_COMP: @@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_USING_XFER_PAGES: - netvsc_receive(net_device, channel, device, desc); + netvsc_receive(ndev, net_device, net_device_ctx, + device, channel, + (struct vmtransfer_page_packet_header *)desc, + nvmsg); break; case VM_PKT_DATA_INBAND: @@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device, void netvsc_channel_cb(void *context) { - int ret; - struct vmbus_channel *channel = (struct vmbus_channel *)context; + struct vmbus_channel *channel = context; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct hv_device *device; struct netvsc_device *net_device; - u32 bytes_recvd; - u64 request_id; struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; bool need_to_commit = false; @@ -1289,74 +1245,29 @@ void netvsc_channel_cb(void *context) else device = channel->device_obj; - net_device = get_inbound_net_device(device); - if (!net_device) - return; ndev = hv_get_drvdata(device); - buffer = get_per_channel_state(channel); + if (unlikely(!ndev)) + return; + + net_device = net_device_to_netvsc_device(ndev); + if (unlikely(net_device->destroy) && + netvsc_channel_idle(net_device, q_idx)) + return; /* commit_rd_index() -> hv_signal_on_read() needs this. */ init_cached_read_index(channel); - do { - desc = get_next_pkt_raw(channel); - if (desc != NULL) { - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - desc->trans_id, - desc); - - put_pkt_raw(channel, desc); - need_to_commit = true; - continue; - } - if (need_to_commit) { - need_to_commit = false; - commit_rd_index(channel); - } - - ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, - &bytes_recvd, &request_id); - if (ret == 0) { - if (bytes_recvd > 0) { - desc = (struct vmpacket_descriptor *)buffer; - netvsc_process_raw_pkt(device, - channel, - net_device, - ndev, - request_id, - desc); - } else { - /* - * We are done for this pass. - */ - break; - } - - } else if (ret == -ENOBUFS) { - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); - /* Handle large packet */ - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (buffer == NULL) { - /* Try again next time around */ - netdev_err(ndev, - "unable to allocate buffer of size " - "(%d)!!\n", bytes_recvd); - break; - } - - bufferlen = bytes_recvd; - } + while ((desc = get_next_pkt_raw(channel)) != NULL) { + netvsc_process_raw_pkt(device, channel, net_device, + ndev, desc->trans_id, desc); + put_pkt_raw(channel, desc); + need_to_commit = true; init_cached_read_index(channel); + } - } while (1); - - if (bufferlen > NETVSC_PACKET_SIZE) - kfree(buffer); + if (need_to_commit) + commit_rd_index(channel); netvsc_chk_recv_comp(net_device, channel, q_idx); } @@ -1365,11 +1276,11 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, void *additional_info) +int netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; - int ring_size = - ((struct netvsc_device_info *)additional_info)->ring_size; + int ring_size = device_info->ring_size; struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -1380,8 +1291,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - set_per_channel_state(device->channel, net_device->cb_buffer); - /* Open the channel */ ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, ring_size * PAGE_SIZE, NULL, 0, @@ -1400,7 +1309,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) * opened. */ for (i = 0; i < VRSS_CHANNEL_MAX; i++) - net_device->chn_table[i] = device->channel; + net_device->chan_table[i].channel = device->channel; /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is * populated. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fcab8019dda0..72b0c1f7496e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,21 +42,11 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) -#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_TSO6 | \ - NETIF_F_HW_CSUM) - -/* Restrict GSO size to account for NVGRE */ -#define NETVSC_GSO_MAX_SIZE 62768 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); -static int max_num_vrss_chns = 8; - static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net) while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chn_table[i]; + chn = nvdev->chan_table[i].channel; if (!chn) continue; @@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } +/* + * Select queue for transmit. + * + * If a valid queue has already been assigned, then use that. + * Otherwise compute tx queue based on hash and the send table. + * + * This is basically similar to default (__netdev_pick_tx) with the added step + * of using the host send_table when no other queue has been assigned. + * + * TODO support XPS - but get_xps_queue not exported + */ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; - u32 hash; - u16 q_idx = 0; + struct sock *sk = skb->sk; + int q_idx = sk_tx_queue_get(sk); - if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) - return 0; + if (q_idx < 0 || skb->ooo_okay || + q_idx >= ndev->real_num_tx_queues) { + u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); + int new_idx; + + new_idx = nvsc_dev->send_table[hash] + % nvsc_dev->num_chn; + + if (q_idx != new_idx && sk && + sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, new_idx); - hash = skb_get_hash(skb); - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % - ndev->real_num_tx_queues; + q_idx = new_idx; + } - if (!nvsc_dev->chn_table[q_idx]) + if (unlikely(!nvsc_dev->chan_table[q_idx].channel)) q_idx = 0; return q_idx; @@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb) return slots + frag_slots; } -static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) +static u32 net_checksum_info(struct sk_buff *skb) { - u32 ret_val = TRANSPORT_INFO_NOT_IP; - - if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) && - (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) { - goto not_ip; - } + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip = ip_hdr(skb); - *trans_off = skb_transport_offset(skb); - - if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) { - struct iphdr *iphdr = ip_hdr(skb); - - if (iphdr->protocol == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV4_TCP; - else if (iphdr->protocol == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV4_UDP; + if (ip->protocol == IPPROTO_TCP) + return TRANSPORT_INFO_IPV4_TCP; + else if (ip->protocol == IPPROTO_UDP) + return TRANSPORT_INFO_IPV4_UDP; } else { - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - ret_val = TRANSPORT_INFO_IPV6_TCP; + struct ipv6hdr *ip6 = ipv6_hdr(skb); + + if (ip6->nexthdr == IPPROTO_TCP) + return TRANSPORT_INFO_IPV6_TCP; else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) - ret_val = TRANSPORT_INFO_IPV6_UDP; + return TRANSPORT_INFO_IPV6_UDP; } -not_ip: - return ret_val; + return TRANSPORT_INFO_NOT_IP; } static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) @@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct rndis_packet *rndis_pkt; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; - struct ndis_tcp_ip_checksum_info *csum_info; - int hdr_offset; - u32 net_trans_info; u32 hash; - u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; @@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * more pages we try linearizing it. */ - skb_length = skb->len; num_data_pgs = netvsc_get_slots(skb) + 2; if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { @@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; + packet->total_bytes = skb->len; + packet->total_packets = 1; rndis_msg = (struct rndis_message *)skb->head; @@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) VLAN_PRIO_SHIFT; } - net_trans_info = get_net_transport_info(skb, &hdr_offset); - - /* - * Setup the sendside checksum offload only if this is not a - * GSO packet. - */ - if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { + if (skb_is_gso(skb)) { struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; @@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ppi->ppi_offset); lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; - if (net_trans_info & (INFO_IPV4 << 16)) { + if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip_hdr(skb)->tot_len = 0; @@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; + lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (net_trans_info & INFO_TCP) { + if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { + struct ndis_tcp_ip_checksum_info *csum_info; + rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); @@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + ppi->ppi_offset); - if (net_trans_info & (INFO_IPV4 << 16)) + csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); + + if (skb->protocol == htons(ETH_P_IP)) { csum_info->transmit.is_ipv4 = 1; - else + + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } else { csum_info->transmit.is_ipv6 = 1; - csum_info->transmit.tcp_checksum = 1; - csum_info->transmit.tcp_header_offset = hdr_offset; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + csum_info->transmit.tcp_checksum = 1; + else + csum_info->transmit.udp_checksum = 1; + } } else { - /* UDP checksum (and other) offload is not supported. */ + /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) goto drop; } @@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); - if (likely(ret == 0)) { - struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->packets++; - tx_stats->bytes += skb_length; - u64_stats_update_end(&tx_stats->syncp); + if (likely(ret == 0)) return NETDEV_TX_OK; - } if (ret == -EAGAIN) { ++net_device_ctx->eth_stats.tx_busy; @@ -541,7 +538,6 @@ no_memory: ++net_device_ctx->eth_stats.tx_no_memory; goto drop; } - /* * netvsc_linkstatus_callback - Link up/down notification */ @@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, } static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, - struct hv_netvsc_packet *packet, - struct ndis_tcp_ip_checksum_info *csum_info, - void *data, u16 vlan_tci) + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan, + void *data, u32 buflen) { struct sk_buff *skb; - skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); + skb = netdev_alloc_skb_ip_align(net, buflen); if (!skb) return skb; @@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ - memcpy(skb_put(skb, packet->total_data_buflen), data, - packet->total_data_buflen); + memcpy(skb_put(skb, buflen), data, buflen); skb->protocol = eth_type_trans(skb, net); @@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (vlan_tci & VLAN_TAG_PRESENT) + if (vlan) { + u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + } return skb; } @@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ -int netvsc_recv_callback(struct hv_device *device_obj, - struct hv_netvsc_packet *packet, - void **data, - struct ndis_tcp_ip_checksum_info *csum_info, - struct vmbus_channel *channel, - u16 vlan_tci) +int netvsc_recv_callback(struct net_device *net, + struct vmbus_channel *channel, + void *data, u32 len, + const struct ndis_tcp_ip_checksum_info *csum_info, + const struct ndis_pkt_8021q_info *vlan) { - struct net_device *net = hv_get_drvdata(device_obj); struct net_device_context *net_device_ctx = netdev_priv(net); + struct netvsc_device *net_device = net_device_ctx->nvdev; struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; + u16 q_idx = channel->offermsg.offer.sub_channel_index; + if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; @@ -665,7 +664,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ - skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); + skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); if (unlikely(!skb)) { ++net->stats.rx_dropped; rcu_read_unlock(); @@ -673,18 +672,17 @@ int netvsc_recv_callback(struct hv_device *device_obj, } if (net != vf_netdev) - skb_record_rx_queue(skb, - channel->offermsg.offer.sub_channel_index); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics * on the synthetic device because modifying the VF device * statistics will not work correctly. */ - rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); + rx_stats = &net_device->chan_table[q_idx].rx_stats; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; - rx_stats->bytes += packet->total_data_buflen; + rx_stats->bytes += len; if (skb->pkt_type == PACKET_BROADCAST) ++rx_stats->broadcast; @@ -697,7 +695,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, * is done. * TODO - use NAPI? */ - netif_rx(skb); + netif_receive_skb(skb); rcu_read_unlock(); return 0; @@ -722,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, + u32 num_chn) +{ + struct netvsc_device_info device_info; + int ret; + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = num_chn; + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = num_chn; + + ret = rndis_filter_device_add(dev, &device_info); + if (ret) + return ret; + + ret = netif_set_real_num_tx_queues(net, num_chn); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(net, num_chn); + + return ret; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; - struct netvsc_device_info device_info; - u32 num_chn; - u32 max_chn; - int ret = 0; - bool recovering = false; + unsigned int count = channels->combined_count; + int ret; + + /* We do not support separate count for rx, tx, or other */ + if (count == 0 || + channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (count > net->num_tx_queues || count > net->num_rx_queues) + return -EINVAL; if (net_device_ctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; - num_chn = nvdev->num_chn; - max_chn = min_t(u32, nvdev->max_chn, num_online_cpus()); - - if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) { - pr_info("vRSS unsupported before NVSP Version 5\n"); + if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) return -EINVAL; - } - /* We do not support rx, tx, or other */ - if (!channels || - channels->rx_count || - channels->tx_count || - channels->other_count || - (channels->combined_count < 1)) + if (count > nvdev->max_chn) return -EINVAL; - if (channels->combined_count > max_chn) { - pr_info("combined channels too high, using %d\n", max_chn); - channels->combined_count = max_chn; - } - ret = netvsc_close(net); if (ret) - goto out; + return ret; - do_set: net_device_ctx->start_remove = true; - rndis_filter_device_remove(dev); - - nvdev->num_chn = channels->combined_count; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; + rndis_filter_device_remove(dev, nvdev); - ret = rndis_filter_device_add(dev, &device_info); - if (ret) { - if (recovering) { - netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - return ret; - } - goto recover; - } - - nvdev = net_device_ctx->nvdev; - - ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set tx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } - - ret = netif_set_real_num_rx_queues(net, nvdev->num_chn); - if (ret) { - if (recovering) { - netdev_err(net, "could not set rx queue count (ret %d)\n", ret); - return ret; - } - goto recover; - } + ret = netvsc_set_queues(net, dev, count); + if (ret == 0) + nvdev->num_chn = count; + else + netvsc_set_queues(net, dev, nvdev->num_chn); - out: netvsc_open(net); net_device_ctx->start_remove = false; + /* We may have missed link change notifications */ schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; - - recover: - /* If the above failed, we attempt to recover through the same - * process but with the original number of channels. - */ - netdev_err(net, "could not set channels, recovering\n"); - recovering = true; - channels->combined_count = num_chn; - goto do_set; } static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) @@ -878,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = ndevctx->nvdev; struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - u32 num_chn; - int ret = 0; + int ret; if (ndevctx->start_remove || !nvdev || nvdev->destroy) return -ENODEV; @@ -888,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto out; - num_chn = nvdev->num_chn; - ndevctx->start_remove = true; - rndis_filter_device_remove(hdev); + rndis_filter_device_remove(hdev, nvdev); ndev->mtu = mtu; memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.num_chn = num_chn; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.num_chn = nvdev->num_chn; + device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_add(hdev, &device_info); out: @@ -911,38 +880,43 @@ out: return ret; } -static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, - struct rtnl_link_stats64 *t) +static void netvsc_get_stats64(struct net_device *net, + struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - int cpu; - - for_each_possible_cpu(cpu) { - struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, - cpu); - struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, - cpu); - u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast; + struct netvsc_device *nvdev = ndev_ctx->nvdev; + int i; + + if (!nvdev) + return; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + const struct netvsc_stats *stats; + u64 packets, bytes, multicast; unsigned int start; + stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&tx_stats->syncp); - tx_packets = tx_stats->packets; - tx_bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + t->tx_bytes += bytes; + t->tx_packets += packets; + + stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&rx_stats->syncp); - rx_packets = rx_stats->packets; - rx_bytes = rx_stats->bytes; - rx_multicast = rx_stats->multicast + rx_stats->broadcast; - } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); - - t->tx_bytes += tx_bytes; - t->tx_packets += tx_packets; - t->rx_bytes += rx_bytes; - t->rx_packets += rx_packets; - t->multicast += rx_multicast; + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + multicast = stats->multicast + stats->broadcast; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + t->rx_bytes += bytes; + t->rx_packets += packets; + t->multicast += multicast; } t->tx_dropped = net->stats.tx_dropped; @@ -950,8 +924,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; - - return t; } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) @@ -989,11 +961,19 @@ static const struct { { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, }; +#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) + +/* 4 statistics per queue (rx/tx packets/bytes) */ +#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) + static int netvsc_get_sset_count(struct net_device *dev, int string_set) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + switch (string_set) { case ETH_SS_STATS: - return ARRAY_SIZE(netvsc_stats); + return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -1003,24 +983,107 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; const void *nds = &ndc->eth_stats; - int i; + const struct netvsc_stats *qstats; + unsigned int start; + u64 packets, bytes; + int i, j; - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + + for (j = 0; j < nvdev->num_chn; j++) { + qstats = &nvdev->chan_table[j].tx_stats; + + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + + qstats = &nvdev->chan_table[j].rx_stats; + do { + start = u64_stats_fetch_begin_irq(&qstats->syncp); + packets = qstats->packets; + bytes = qstats->bytes; + } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + data[i++] = packets; + data[i++] = bytes; + } } static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, + memcpy(p + i * ETH_GSTRING_LEN, netvsc_stats[i].name, ETH_GSTRING_LEN); + + p += i * ETH_GSTRING_LEN; + for (i = 0; i < nvdev->num_chn; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + + break; + } +} + +static int +netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, + struct ethtool_rxnfc *info) +{ + info->data = RXH_IP_SRC | RXH_IP_DST; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: break; + default: + info->data = 0; + break; + } + + return 0; +} + +static int +netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *nvdev = ndc->nvdev; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = nvdev->num_chn; + return 0; + + case ETHTOOL_GRXFH: + return netvsc_get_rss_hash_opts(nvdev, info); } + return -EOPNOTSUPP; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1032,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net) } #endif +static u32 netvsc_get_rxfh_key_size(struct net_device *dev) +{ + return NETVSC_HASH_KEYLEN; +} + +static u32 netvsc_rss_indir_size(struct net_device *dev) +{ + return ITAB_NUM; +} + +static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + indir[i] = rndis_dev->ind_table[i]; + } + + if (key) + memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); + + return 0; +} + +static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev = ndc->nvdev; + struct rndis_device *rndis_dev = ndev->extension; + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < ITAB_NUM; i++) + if (indir[i] >= dev->num_rx_queues) + return -EINVAL; + + for (i = 0; i < ITAB_NUM; i++) + rndis_dev->ind_table[i] = indir[i]; + } + + if (!key) { + if (!indir) + return 0; + + key = rndis_dev->rss_key; + } + + return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1043,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_settings = netvsc_get_settings, .set_settings = netvsc_set_settings, + .get_rxnfc = netvsc_get_rxnfc, + .get_rxfh_key_size = netvsc_get_rxfh_key_size, + .get_rxfh_indir_size = netvsc_rss_indir_size, + .get_rxfh = netvsc_get_rxfh, + .set_rxfh = netvsc_set_rxfh, }; static const struct net_device_ops device_ops = { @@ -1163,15 +1293,6 @@ out_unlock: rtnl_unlock(); } -static void netvsc_free_netdev(struct net_device *netdev) -{ - struct net_device_context *net_device_ctx = netdev_priv(netdev); - - free_percpu(net_device_ctx->tx_stats); - free_percpu(net_device_ctx->rx_stats); - free_netdev(netdev); -} - static struct net_device *get_netvsc_bymac(const u8 *mac) { struct net_device *dev; @@ -1308,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev) static int netvsc_unregister_vf(struct net_device *vf_netdev) { struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; ndev = get_netvsc_byref(vf_netdev); @@ -1316,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); @@ -1336,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev, int ret; net = alloc_etherdev_mq(sizeof(struct net_device_context), - num_online_cpus()); + VRSS_CHANNEL_MAX); if (!net) return -ENOMEM; @@ -1351,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev, netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); - net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->tx_stats) { - free_netdev(net); - return -ENOMEM; - } - net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); - if (!net_device_ctx->rx_stats) { - free_percpu(net_device_ctx->tx_stats); - free_netdev(net); - return -ENOMEM; - } - hv_set_drvdata(dev, net); net_device_ctx->start_remove = false; @@ -1374,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev, INIT_LIST_HEAD(&net_device_ctx->reconfig_events); net->netdev_ops = &device_ops; - - net->hw_features = NETVSC_HW_FEATURES; - net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; - net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); @@ -1387,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = max_num_vrss_chns; + device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT, + num_online_cpus()); ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - netvsc_free_netdev(net); + free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + /* hw_features computed in rndis_filter_device_add */ + net->features = net->hw_features | + NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + net->vlan_features = net->features; + nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; @@ -1412,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev); - netvsc_free_netdev(net); + rndis_filter_device_remove(dev, nvdev); + free_netdev(net); } return ret; @@ -1423,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev) { struct net_device *net; struct net_device_context *ndev_ctx; - struct netvsc_device *net_device; net = hv_get_drvdata(dev); @@ -1433,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev) } ndev_ctx = netdev_priv(net); - net_device = ndev_ctx->nvdev; /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels() * removing the device. @@ -1454,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev) * Call to the vsc driver to let it know that the device is being * removed */ - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, ndev_ctx->nvdev); hv_set_drvdata(dev, NULL); - netvsc_free_netdev(net); + free_netdev(net); return 0; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8d90904e0e49..19356f56b7b1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -57,6 +57,14 @@ struct rndis_request { u8 request_ext[RNDIS_EXT_LEN]; }; +static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa +}; + static struct rndis_device *get_rndis_device(void) { struct rndis_device *device; @@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev, } static void dump_rndis_message(struct hv_device *hv_dev, - struct rndis_message *rndis_msg) + const struct rndis_message *rndis_msg) { struct net_device *netdev = hv_get_drvdata(hv_dev); @@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) return NULL; } -static int rndis_filter_receive_data(struct rndis_device *dev, - struct rndis_message *msg, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +static int rndis_filter_receive_data(struct net_device *ndev, + struct rndis_device *dev, + struct rndis_message *msg, + struct vmbus_channel *channel, + void *data, u32 data_buflen) { - struct rndis_packet *rndis_pkt; + struct rndis_packet *rndis_pkt = &msg->msg.pkt; + const struct ndis_tcp_ip_checksum_info *csum_info; + const struct ndis_pkt_8021q_info *vlan; u32 data_offset; - struct ndis_pkt_8021q_info *vlan; - struct ndis_tcp_ip_checksum_info *csum_info; - u16 vlan_tci = 0; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - - rndis_pkt = &msg->msg.pkt; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; - pkt->total_data_buflen -= data_offset; + data_buflen -= data_offset; /* * Make sure we got a valid RNDIS message, now total_data_buflen * should be the data packet size plus the trailer padding size */ - if (pkt->total_data_buflen < rndis_pkt->data_len) { + if (unlikely(data_buflen < rndis_pkt->data_len)) { netdev_err(dev->ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", - pkt->total_data_buflen, rndis_pkt->data_len); + data_buflen, rndis_pkt->data_len); return NVSP_STAT_FAIL; } + vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); + /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ - pkt->total_data_buflen = rndis_pkt->data_len; - *data = (void *)((unsigned long)(*data) + data_offset); - - vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); - if (vlan) { - vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid | - (vlan->pri << VLAN_PRIO_SHIFT); - } - + data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data, - csum_info, channel, vlan_tci); + return netvsc_recv_callback(ndev, channel, + data, rndis_pkt->data_len, + csum_info, vlan); } -int rndis_filter_receive(struct hv_device *dev, - struct hv_netvsc_packet *pkt, - void **data, - struct vmbus_channel *channel) +int rndis_filter_receive(struct net_device *ndev, + struct netvsc_device *net_dev, + struct hv_device *dev, + struct vmbus_channel *channel, + void *data, u32 buflen) { - struct net_device *ndev = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_dev = net_device_ctx->nvdev; - struct rndis_device *rndis_dev; - struct rndis_message *rndis_msg; - int ret = 0; - - if (!net_dev) { - ret = NVSP_STAT_FAIL; - goto exit; - } + struct rndis_device *rndis_dev = net_dev->extension; + struct rndis_message *rndis_msg = data; /* Make sure the rndis device state is initialized */ - if (!net_dev->extension) { - netdev_err(ndev, "got rndis message but no rndis device - " - "dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(!rndis_dev)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message but no rndis device!\n"); + return NVSP_STAT_FAIL; } - rndis_dev = (struct rndis_device *)net_dev->extension; - if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { - netdev_err(ndev, "got rndis message but rndis device " - "uninitialized...dropping this message!\n"); - ret = NVSP_STAT_FAIL; - goto exit; + if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { + netif_err(net_device_ctx, rx_err, ndev, + "got rndis message uninitialized\n"); + return NVSP_STAT_FAIL; } - rndis_msg = *data; - - if (netif_msg_rx_err(net_device_ctx)) + if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - /* data msg */ - ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt, - data, channel); - break; - + return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: @@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev, break; } -exit: - return ret; + return 0; } static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, @@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->info_buflen = 0; query->dev_vc_handle = 0; - if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { + if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { + struct net_device_context *ndevctx = netdev_priv(dev->ndev); + struct netvsc_device *nvdev = ndevctx->nvdev; + struct ndis_offload *hwcaps; + u32 nvsp_version = nvdev->nvsp_version; + u8 ndis_rev; + size_t size; + + if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3; + size = NDIS_OFFLOAD_SIZE; + } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2; + size = NDIS_OFFLOAD_SIZE_6_1; + } else { + ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1; + size = NDIS_OFFLOAD_SIZE_6_0; + } + + request->request_msg.msg_len += size; + query->info_buflen = size; + hwcaps = (struct ndis_offload *) + ((unsigned long)query + query->info_buf_offset); + + hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD; + hwcaps->header.revision = ndis_rev; + hwcaps->header.size = size; + + } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { struct ndis_recv_scale_cap *cap; request->request_msg.msg_len += @@ -526,6 +537,44 @@ cleanup: return ret; } +/* Get the hardware offload capabilities */ +static int +rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +{ + u32 caps_len = sizeof(*caps); + int ret; + + memset(caps, 0, sizeof(*caps)); + + ret = rndis_filter_query_device(dev, + OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, + caps, &caps_len); + if (ret) + return ret; + + if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) { + netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n", + caps->header.type); + return -EINVAL; + } + + if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) { + netdev_warn(dev->ndev, "invalid NDIS objrev %x\n", + caps->header.revision); + return -EINVAL; + } + + if (caps->header.size > caps_len || + caps->header.size < NDIS_OFFLOAD_SIZE_6_0) { + netdev_warn(dev->ndev, + "invalid NDIS objsize %u, data size %u\n", + caps->header.size, caps_len); + return -EINVAL; + } + + return 0; +} + static int rndis_filter_query_device_mac(struct rndis_device *dev) { u32 size = ETH_ALEN; @@ -663,23 +712,15 @@ cleanup: return ret; } -static const u8 netvsc_hash_key[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa -}; -#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key) - -static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key, int num_queue) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_recv_scale_param) + - 4*ITAB_NUM + HASH_KEYLEN; + 4 * ITAB_NUM + NETVSC_HASH_KEYLEN; struct ndis_recv_scale_param *rssp; u32 *itab; u8 *keyp; @@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); - rssp->hashkey_size = HASH_KEYLEN; + rssp->hashkey_size = NETVSC_HASH_KEYLEN; rssp->kashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = i % num_queue; + itab[i] = rdev->ind_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); - for (i = 0; i < HASH_KEYLEN; i++) - keyp[i] = netvsc_hash_key[i]; + memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); if (ret != 0) @@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { + if (set_complete->status == RNDIS_STATUS_SUCCESS) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; - u32 status; int ret; request = get_rndis_request(dev, RNDIS_MSG_SET, @@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - status = set_complete->status; - cleanup: if (request) put_rndis_request(dev, request); @@ -864,6 +903,23 @@ cleanup: return ret; } +static bool netvsc_device_idle(const struct netvsc_device *nvdev) +{ + int i; + + if (atomic_read(&nvdev->num_outstanding_recvs) > 0) + return false; + + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + + if (atomic_read(&nvchan->queue_sends) > 0) + return false; + } + + return true; +} + static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; @@ -894,9 +950,7 @@ cleanup: spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); /* Wait for all send completions */ - wait_event(nvdev->wait_drain, - atomic_read(&nvdev->num_outstanding_sends) == 0 && - atomic_read(&nvdev->num_outstanding_recvs) == 0); + wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); if (request) put_rndis_request(dev, request); @@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (chn_index >= nvscdev->num_chn) return; - set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) * - NETVSC_PACKET_SIZE); - - nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * - sizeof(struct recv_comp_data)); + nvscdev->chan_table[chn_index].mrc.buf + = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); if (ret == 0) - nvscdev->chn_table[chn_index] = new_sc; + nvscdev->chan_table[chn_index].channel = new_sc; spin_lock_irqsave(&nvscdev->sc_lock, flags); nvscdev->num_sc_offered--; @@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) } int rndis_filter_device_add(struct hv_device *dev, - void *additional_info) + struct netvsc_device_info *device_info) { - int ret; struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; - struct netvsc_device_info *device_info = additional_info; + struct ndis_offload hwcaps; struct ndis_offload_params offloads; struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + unsigned int gso_max_size = GSO_MAX_SIZE; u32 mtu, size; u32 num_rss_qs; u32 sc_delta; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; unsigned long flags; + int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) @@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev, * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, additional_info); + ret = netvsc_device_add(dev, device_info); if (ret != 0) { kfree(rndis_device); return ret; @@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } @@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev, /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); - /* Turn on the offloads; the host supports all of the relevant - * offloads. - */ + /* Find HW offload capabilities */ + ret = rndis_query_hwcaps(rndis_device, &hwcaps); + if (ret != 0) { + rndis_filter_device_remove(dev, net_device); + return ret; + } + + /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); - /* A value of zero means "no change"; now turn on what we - * want. - */ - offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; - offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + + /* Linux does not care about IP checksum, always does in kernel */ + offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; + + /* Compute tx offload settings based on hw capabilities */ + net->hw_features = NETIF_F_RXCSUM; + + if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { + /* Can checksum TCP */ + net->hw_features |= NETIF_F_IP_CSUM; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP; + + offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + + if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) { + offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO; + + if (hwcaps.lsov2.ip4_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip4_maxsz; + } + + if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { + offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP; + } + } + + if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) { + net->hw_features |= NETIF_F_IPV6_CSUM; + + offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP; + + if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) && + (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) { + offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; + net->hw_features |= NETIF_F_TSO6; + + if (hwcaps.lsov2.ip6_maxsz < gso_max_size) + gso_max_size = hwcaps.lsov2.ip6_maxsz; + } + + if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { + offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; + net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP; + } + } + + netif_set_gso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, &offloads); if (ret) @@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); num_rss_qs = net_device->num_chn - 1; + + for (i = 0; i < ITAB_NUM; i++) + rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, + net_device->num_chn); + net_device->num_sc_offered = num_rss_qs; if (net_device->num_chn == 1) goto out; - net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) * - NETVSC_PACKET_SIZE); - if (!net_device->sub_cb_buf) { - net_device->num_chn = 1; - dev_info(&dev->device, "No memory for subchannels.\n"); - goto out; - } - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; @@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; - ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); + ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, + net_device->num_chn); /* * Set the number of sub-channels to be received. @@ -1152,13 +1248,13 @@ out: return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: - rndis_filter_device_remove(dev); + rndis_filter_device_remove(dev, net_device); return ret; } -void rndis_filter_device_remove(struct hv_device *dev) +void rndis_filter_device_remove(struct hv_device *dev, + struct netvsc_device *net_dev) { - struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev); struct rndis_device *rndis_dev = net_dev->extension; /* If not all subchannel offers are complete, wait for them until diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 66c0eeafcb5d..312fce7302d3 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -78,10 +78,8 @@ static void ifb_ri_tasklet(unsigned long _txp) } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { - u32 from = G_TC_FROM(skb->tc_verd); - - skb->tc_verd = 0; - skb->tc_verd = SET_TC_NCLS(skb->tc_verd); + skb->tc_redirected = 0; + skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); txp->tx_packets++; @@ -101,13 +99,12 @@ static void ifb_ri_tasklet(unsigned long _txp) rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; - if (from & AT_EGRESS) { + if (!skb->tc_from_ingress) { dev_queue_xmit(skb); - } else if (from & AT_INGRESS) { + } else { skb_pull(skb, skb->mac_len); netif_receive_skb(skb); - } else - BUG(); + } } if (__netif_tx_trylock(txq)) { @@ -129,8 +126,8 @@ resched: } -static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void ifb_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private; @@ -157,8 +154,6 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; - - return stats; } static int ifb_dev_init(struct net_device *dev) @@ -241,7 +236,6 @@ static void ifb_setup(struct net_device *dev) static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_dev_private *dp = netdev_priv(dev); - u32 from = G_TC_FROM(skb->tc_verd); struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); u64_stats_update_begin(&txp->rsync); @@ -249,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) txp->rx_bytes += skb->len; u64_stats_update_end(&txp->rsync); - if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { + if (!skb->tc_redirected || !skb->skb_iif) { dev_kfree_skb(skb); dev->stats.rx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index dbfbb33ac66c..406ae4ff0ae8 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -94,9 +94,11 @@ struct ipvl_port { struct hlist_head hlhead[IPVLAN_HASH_SIZE]; struct list_head ipvlans; u16 mode; + u16 dev_id_start; struct work_struct wq; struct sk_buff_head backlog; int count; + struct ida ida; }; struct ipvl_skb_cb { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 83ce74acf82d..8ae335d73d38 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -19,9 +19,6 @@ void ipvlan_init_secret(void) static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast) { - if (!ipvlan) - return; - if (likely(success)) { struct ipvl_pcpu_stats *pcptr; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 8b0f99300cbc..95b18f4602cf 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev) return -EINVAL; } - if (netif_is_macvlan_port(dev)) { - netdev_err(dev, "Master is a macvlan port.\n"); + if (netdev_is_rx_handler_busy(dev)) { + netdev_err(dev, "Device is already in use.\n"); return -EBUSY; } @@ -119,6 +119,8 @@ static int ipvlan_port_create(struct net_device *dev) skb_queue_head_init(&port->backlog); INIT_WORK(&port->wq, ipvlan_process_multicast); + ida_init(&port->ida); + port->dev_id_start = 1; err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port); if (err) @@ -150,6 +152,7 @@ static void ipvlan_port_destroy(struct net_device *dev) dev_put(skb->dev); kfree_skb(skb); } + ida_destroy(&port->ida); kfree(port); } @@ -301,8 +304,8 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev) dev_mc_sync(ipvlan->phy_dev, dev); } -static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *s) +static void ipvlan_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) { struct ipvl_dev *ipvlan = netdev_priv(dev); @@ -339,7 +342,6 @@ static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev, s->rx_dropped = rx_errs; s->tx_dropped = tx_drps; } - return s; } static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) @@ -533,6 +535,29 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); + /* If the port-id base is at the MAX value, then wrap it around and + * begin from 0x1 again. This may be due to a busy system where lots + * of slaves are getting created and deleted. + */ + if (port->dev_id_start == 0xFFFE) + port->dev_id_start = 0x1; + + /* Since L2 address is shared among all IPvlan slaves including + * master, use unique 16 bit dev-ids to diffentiate among them. + * Assign IDs between 0x1 and 0xFFFE (used by the master) to each + * slave link [see addrconf_ifid_eui48()]. + */ + err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE, + GFP_KERNEL); + if (err < 0) + err = ida_simple_get(&port->ida, 0x1, port->dev_id_start, + GFP_KERNEL); + if (err < 0) + goto destroy_ipvlan_port; + dev->dev_id = err; + /* Increment id-base to the next slot for the future assignment */ + port->dev_id_start = err + 1; + /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing * packets. @@ -543,7 +568,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err < 0) - goto destroy_ipvlan_port; + goto remove_ida; err = netdev_upper_dev_link(phy_dev, dev); if (err) { @@ -562,6 +587,8 @@ unlink_netdev: netdev_upper_dev_unlink(phy_dev, dev); unregister_netdev: unregister_netdevice(dev); +remove_ida: + ida_simple_remove(&port->ida, dev->dev_id); destroy_ipvlan_port: if (create) ipvlan_port_destroy(phy_dev); @@ -579,6 +606,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) kfree_rcu(addr, rcu); } + ida_simple_remove(&ipvlan->port->ida, dev->dev_id); list_del_rcu(&ipvlan->pnode); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); @@ -674,23 +702,22 @@ static int ipvlan_device_event(struct notifier_block *unused, return NOTIFY_DONE; } -static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; - if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { - netif_err(ipvlan, ifup, ipvlan->dev, - "Failed to add IPv6=%pI6c addr for %s intf\n", - ip6_addr, ipvlan->dev->name); - return -EINVAL; - } addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC); if (!addr) return -ENOMEM; addr->master = ipvlan; - memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); - addr->atype = IPVL_IPV6; + if (is_v6) { + memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr)); + addr->atype = IPVL_IPV6; + } else { + memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr)); + addr->atype = IPVL_IPV4; + } list_add_tail(&addr->anode, &ipvlan->addrs); /* If the interface is not up, the address will be added to the hash @@ -702,11 +729,11 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) return 0; } -static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) { struct ipvl_addr *addr; - addr = ipvlan_find_addr(ipvlan, ip6_addr, true); + addr = ipvlan_find_addr(ipvlan, iaddr, is_v6); if (!addr) return; @@ -717,6 +744,23 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) return; } +static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { + netif_err(ipvlan, ifup, ipvlan->dev, + "Failed to add IPv6=%pI6c addr for %s intf\n", + ip6_addr, ipvlan->dev->name); + return -EINVAL; + } + + return ipvlan_add_addr(ipvlan, ip6_addr, true); +} + +static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) +{ + return ipvlan_del_addr(ipvlan, ip6_addr, true); +} + static int ipvlan_addr6_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -750,45 +794,19 @@ static int ipvlan_addr6_event(struct notifier_block *unused, static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { - struct ipvl_addr *addr; - if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { netif_err(ipvlan, ifup, ipvlan->dev, "Failed to add IPv4=%pI4 on %s intf.\n", ip4_addr, ipvlan->dev->name); return -EINVAL; } - addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL); - if (!addr) - return -ENOMEM; - - addr->master = ipvlan; - memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); - addr->atype = IPVL_IPV4; - list_add_tail(&addr->anode, &ipvlan->addrs); - - /* If the interface is not up, the address will be added to the hash - * list by ipvlan_open. - */ - if (netif_running(ipvlan->dev)) - ipvlan_ht_addr_add(ipvlan, addr); - return 0; + return ipvlan_add_addr(ipvlan, ip4_addr, false); } static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { - struct ipvl_addr *addr; - - addr = ipvlan_find_addr(ipvlan, ip4_addr, false); - if (!addr) - return; - - ipvlan_ht_addr_del(addr); - list_del(&addr->anode); - kfree_rcu(addr, rcu); - - return; + return ipvlan_del_addr(ipvlan, ip4_addr, false); } static int ipvlan_addr4_event(struct notifier_block *unused, diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index be5bb0b7f29c..3151b580dbd6 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -22,7 +22,7 @@ static int max_rate = 57600; static int max_rate = 115200; #endif -static void turnaround_delay(unsigned long last_jif, int mtt) +static void turnaround_delay(int mtt) { long ticks; @@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev) UART_CLEAR_LSR(port); ch = UART_GET_CHAR(port); async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); - dev->last_rx = jiffies; } static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id) @@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work) int tx_cnt = 10; while (bfin_sir_is_receiving(dev) && --tx_cnt) - turnaround_delay(dev->last_rx, self->mtt); + turnaround_delay(self->mtt); bfin_sir_stop_rx(port); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index e3fe9a286136..fede6864c737 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self) async_unwrap_char(self->ndev, &self->ndev->stats, &self->rx_buff, (u8)data); - self->ndev->last_rx = jiffies; if (EOFD & sh_sir_read(self, IRIF_SIR_FRM)) continue; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 1e05b7c2d157..30a493936e63 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void loopback_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { u64 bytes = 0; u64 packets = 0; @@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, stats->tx_packets = packets; stats->rx_bytes = bytes; stats->tx_bytes = bytes; - return stats; } static u32 always_on(struct net_device *dev) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index f83cf6696820..778a77303c49 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2888,13 +2888,13 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *s) +static void macsec_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) { int cpu; if (!dev->tstats) - return s; + return; for_each_possible_cpu(cpu) { struct pcpu_sw_netstats *stats; @@ -2918,8 +2918,6 @@ static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, s->rx_dropped = dev->stats.rx_dropped; s->tx_dropped = dev->stats.tx_dropped; - - return s; } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 20b3fdf282c5..cbfc1be23a0e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -855,8 +855,8 @@ static void macvlan_uninit(struct net_device *dev) macvlan_port_destroy(port->dev); } -static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void macvlan_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -893,7 +893,6 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } - return stats; } static int macvlan_vlan_rx_add_vid(struct net_device *dev, @@ -1111,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev) if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; - if (netif_is_ipvlan_port(dev)) + if (netdev_is_rx_handler_busy(dev)) return -EBUSY; port = kzalloc(sizeof(*port), GFP_KERNEL); diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index 3e027ed0b3bb..077364cbf439 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -342,6 +342,184 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, EXPORT_SYMBOL(mdio45_ethtool_gset_npage); /** + * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS + * @mdio: MDIO interface + * @cmd: Ethtool request structure + * @npage_adv: Modes currently advertised on next pages + * @npage_lpa: Modes advertised by link partner on next pages + * + * The @cmd parameter is expected to have been cleared before calling + * mdio45_ethtool_ksettings_get_npage(). + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. The + * caller must pass them in. + */ +void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd, + u32 npage_adv, u32 npage_lpa) +{ + int reg; + u32 speed, supported = 0, advertising = 0, lp_advertising = 0; + + BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22); + BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45); + + cmd->base.phy_address = mdio->prtad; + cmd->base.mdio_support = + mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22); + + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_CTRL2); + switch (reg & MDIO_PMA_CTRL2_TYPE) { + case MDIO_PMA_CTRL2_10GBT: + case MDIO_PMA_CTRL2_1000BT: + case MDIO_PMA_CTRL2_100BTX: + case MDIO_PMA_CTRL2_10BT: + cmd->base.port = PORT_TP; + supported = SUPPORTED_TP; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_SPEED); + if (reg & MDIO_SPEED_10G) + supported |= SUPPORTED_10000baseT_Full; + if (reg & MDIO_PMA_SPEED_1000) + supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half); + if (reg & MDIO_PMA_SPEED_100) + supported |= (SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half); + if (reg & MDIO_PMA_SPEED_10) + supported |= (SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half); + advertising = ADVERTISED_TP; + break; + + case MDIO_PMA_CTRL2_10GBCX4: + cmd->base.port = PORT_OTHER; + supported = 0; + advertising = 0; + break; + + case MDIO_PMA_CTRL2_10GBKX4: + case MDIO_PMA_CTRL2_10GBKR: + case MDIO_PMA_CTRL2_1000BKX: + cmd->base.port = PORT_OTHER; + supported = SUPPORTED_Backplane; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_EXTABLE); + if (reg & MDIO_PMA_EXTABLE_10GBKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (reg & MDIO_PMA_EXTABLE_10GBKR) + supported |= SUPPORTED_10000baseKR_Full; + if (reg & MDIO_PMA_EXTABLE_1000BKX) + supported |= SUPPORTED_1000baseKX_Full; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBR_FECABLE); + if (reg & MDIO_PMA_10GBR_FECABLE_ABLE) + supported |= SUPPORTED_10000baseR_FEC; + advertising = ADVERTISED_Backplane; + break; + + /* All the other defined modes are flavours of optical */ + default: + cmd->base.port = PORT_FIBRE; + supported = SUPPORTED_FIBRE; + advertising = ADVERTISED_FIBRE; + break; + } + + if (mdio->mmds & MDIO_DEVS_AN) { + supported |= SUPPORTED_Autoneg; + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, + MDIO_CTRL1); + if (reg & MDIO_AN_CTRL1_ENABLE) { + cmd->base.autoneg = AUTONEG_ENABLE; + advertising |= + ADVERTISED_Autoneg | + mdio45_get_an(mdio, MDIO_AN_ADVERTISE) | + npage_adv; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (cmd->base.autoneg) { + u32 modes = 0; + int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad, + MDIO_MMD_AN, MDIO_STAT1); + + /* If AN is complete and successful, report best common + * mode, otherwise report best advertised mode. + */ + if (an_stat & MDIO_AN_STAT1_COMPLETE) { + lp_advertising = + mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa; + if (an_stat & MDIO_AN_STAT1_LPABLE) + lp_advertising |= ADVERTISED_Autoneg; + modes = advertising & lp_advertising; + } + if ((modes & ~ADVERTISED_Autoneg) == 0) + modes = advertising; + + if (modes & (ADVERTISED_10000baseT_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_10000baseKR_Full)) { + speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + } else if (modes & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseKX_Full)) { + speed = SPEED_1000; + cmd->base.duplex = !(modes & ADVERTISED_1000baseT_Half); + } else if (modes & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + speed = SPEED_100; + cmd->base.duplex = !!(modes & ADVERTISED_100baseT_Full); + } else { + speed = SPEED_10; + cmd->base.duplex = !!(modes & ADVERTISED_10baseT_Full); + } + } else { + /* Report forced settings */ + reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_CTRL1); + speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) + * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); + cmd->base.duplex = (reg & MDIO_CTRL1_FULLDPLX || + speed == SPEED_10000); + } + + cmd->base.speed = speed; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + /* 10GBASE-T MDI/MDI-X */ + if (cmd->base.port == PORT_TP && (cmd->base.speed == SPEED_10000)) { + switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBT_SWAPPOL)) { + case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: + cmd->base.eth_tp_mdix = ETH_TP_MDI; + break; + case 0: + cmd->base.eth_tp_mdix = ETH_TP_MDI_X; + break; + default: + /* It's complicated... */ + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + break; + } + } +} +EXPORT_SYMBOL(mdio45_ethtool_ksettings_get_npage); + +/** * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs * @mdio: MDIO interface * @mii_data: MII ioctl data structure diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 2de7faee9b19..b91603835d26 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -58,7 +58,7 @@ static int nlmon_close(struct net_device *dev) return netlink_remove_tap(&nlmon->nt); } -static struct rtnl_link_stats64 * +static void nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; @@ -86,8 +86,6 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_bytes = bytes; stats->tx_bytes = 0; - - return stats; } static u32 always_on(struct net_device *dev) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 264b085d796b..aa01020ab1b9 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev) +{ + /* +1 RC_CAL codes for RL centering for both LT and HT conditions */ + bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003); + + /* Cut master bias current by 2% to compensate for RC_CAL offset */ + bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b); + + /* Improve hybrid leakage */ + bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3); + + /* Change rx_on_tune 8 to 0xf */ + bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6); + + /* Change 100Tx EEE bandwidth */ + bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d); + + /* Enable ffe zero detection for Vitesse interoperability */ + bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015); + + r_rc_cal_reset(phydev); + + return 0; +} + static int bcm7xxx_28nm_config_init(struct phy_device *phydev) { u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags); @@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) u8 count; int ret = 0; + /* Newer devices have moved the revision information back into a + * standard location in MII_PHYS_ID[23] + */ + if (rev == 0) + rev = phydev->phy_id & ~phydev->drv->phy_id_mask; + pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", phydev_name(phydev), phydev->drv->name, rev, patch); @@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) case 0x10: ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev); break; + case 0x01: + ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev); + break; default: break; } @@ -416,6 +450,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), @@ -430,6 +465,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7278, 0xfffffff0, }, { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, { PHY_ID_BCM7346, 0xfffffff0, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 4223e35490b0..9cd8b27d1292 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -30,6 +30,50 @@ MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); +static int bcm54210e_config_init(struct phy_device *phydev) +{ + int val; + + val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); + val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + val |= MII_BCM54XX_AUXCTL_MISC_WREN; + bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val); + + val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL); + val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; + bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val); + + return 0; +} + +static int bcm54612e_config_init(struct phy_device *phydev) +{ + /* Clear TX internal delay unless requested. */ + if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && + (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { + /* Disable TXD to GTXCLK clock delay (default set) */ + /* Bit 9 is the only field in shadow register 00011 */ + bcm_phy_write_shadow(phydev, 0x03, 0); + } + + /* Clear RX internal delay unless requested. */ + if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && + (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { + u16 reg; + + reg = bcm54xx_auxctl_read(phydev, + MII_BCM54XX_AUXCTL_SHDWSEL_MISC); + /* Disable RXD to RXC delay (default set) */ + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + /* Clear shadow selector field */ + reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; + bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, + MII_BCM54XX_AUXCTL_MISC_WREN | reg); + } + + return 0; +} + static int bcm54810_config(struct phy_device *phydev) { int rc, val; @@ -230,7 +274,15 @@ static int bcm54xx_config_init(struct phy_device *phydev) (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) bcm54xx_adjust_rxrefclk(phydev); - if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { + if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) { + err = bcm54210e_config_init(phydev); + if (err) + return err; + } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) { + err = bcm54612e_config_init(phydev); + if (err) + return err; + } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { err = bcm54810_config(phydev); if (err) return err; @@ -375,41 +427,6 @@ static int bcm5481_config_aneg(struct phy_device *phydev) return ret; } -static int bcm54612e_config_aneg(struct phy_device *phydev) -{ - int ret; - - /* First, auto-negotiate. */ - ret = genphy_config_aneg(phydev); - - /* Clear TX internal delay unless requested. */ - if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { - /* Disable TXD to GTXCLK clock delay (default set) */ - /* Bit 9 is the only field in shadow register 00011 */ - bcm_phy_write_shadow(phydev, 0x03, 0); - } - - /* Clear RX internal delay unless requested. */ - if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { - u16 reg; - - /* Errata: reads require filling in the write selector field */ - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, - MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC); - reg = phy_read(phydev, MII_BCM54XX_AUX_CTL); - /* Disable RXD to RXC delay (default set) */ - reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW; - /* Clear shadow selector field */ - reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK; - bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, - MII_BCM54XX_AUXCTL_MISC_WREN | reg); - } - - return ret; -} - static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) { int val; @@ -544,6 +561,17 @@ static struct phy_driver broadcom_drivers[] = { .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { + .phy_id = PHY_ID_BCM54210E, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM54210E", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = bcm54xx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = bcm_phy_ack_intr, + .config_intr = bcm_phy_config_intr, +}, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", @@ -561,7 +589,7 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = bcm54612e_config_aneg, + .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -682,6 +710,7 @@ module_phy_driver(broadcom_drivers); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, { PHY_ID_BCM5421, 0xfffffff0 }, + { PHY_ID_BCM54210E, 0xfffffff0 }, { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM54612E, 0xfffffff0 }, { PHY_ID_BCM54616S, 0xfffffff0 }, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index ed0d235cf850..f9d0fa315a47 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -17,8 +17,10 @@ */ #include <linux/kernel.h> #include <linux/string.h> +#include <linux/ctype.h> #include <linux/errno.h> #include <linux/unistd.h> +#include <linux/hwmon.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -90,6 +92,17 @@ #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) #define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_MISC_TEST 0x1a +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 +#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT 8 +#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN BIT(7) +#define MII_88E1510_MISC_TEST_TEMP_IRQ BIT(6) +#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN BIT(5) +#define MII_88E1121_MISC_TEST_TEMP_MASK 0x1f + +#define MII_88E1510_TEMP_SENSOR 0x1b +#define MII_88E1510_TEMP_SENSOR_MASK 0xff + #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = { struct marvell_priv { u64 stats[ARRAY_SIZE(marvell_hw_stats)]; + char *hwmon_name; + struct device *hwmon_dev; }; static int marvell_ack_interrupt(struct phy_device *phydev) @@ -1468,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } +#ifdef CONFIG_HWMON +static int m88e1121_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + int val; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = phy_read(phydev, MII_88E1121_MISC_TEST); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + if (ret < 0) + goto error; + + *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1121_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1121_get_temp(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static umode_t m88e1121_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static u32 m88e1121_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_chip = { + .type = hwmon_chip, + .config = m88e1121_hwmon_chip_config, +}; + +static u32 m88e1121_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info m88e1121_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1121_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1121_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1121_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = { + .is_visible = m88e1121_hwmon_is_visible, + .read = m88e1121_hwmon_read, +}; + +static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { + .ops = &m88e1121_hwmon_hwmon_ops, + .info = m88e1121_hwmon_info, +}; + +static int m88e1510_get_temp(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR); + if (ret < 0) + goto error; + + *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp) +{ + int ret; + + *temp = 0; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >> + MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25; + /* convert to mC */ + *temp *= 1000; + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_set_temp_critical(struct phy_device *phydev, long temp) +{ + int ret; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + + temp = temp / 1000; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + ret = phy_write(phydev, MII_88E1121_MISC_TEST, + (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) | + (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT)); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) +{ + int ret; + + *alarm = false; + + mutex_lock(&phydev->lock); + + ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6); + if (ret < 0) + goto error; + + ret = phy_read(phydev, MII_88E1121_MISC_TEST); + if (ret < 0) + goto error; + *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); + +error: + phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0); + mutex_unlock(&phydev->lock); + + return ret; +} + +static int m88e1510_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e1510_get_temp(phydev, temp); + break; + case hwmon_temp_crit: + err = m88e1510_get_temp_critical(phydev, temp); + break; + case hwmon_temp_max_alarm: + err = m88e1510_get_temp_alarm(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static int m88e1510_hwmon_write(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_crit: + err = m88e1510_set_temp_critical(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + return err; +} + +static umode_t m88e1510_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_crit: + return 0644; + default: + return 0; + } +} + +static u32 m88e1510_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, + 0 +}; + +static const struct hwmon_channel_info m88e1510_hwmon_temp = { + .type = hwmon_temp, + .config = m88e1510_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e1510_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e1510_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = { + .is_visible = m88e1510_hwmon_is_visible, + .read = m88e1510_hwmon_read, + .write = m88e1510_hwmon_write, +}; + +static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { + .ops = &m88e1510_hwmon_hwmon_ops, + .info = m88e1510_hwmon_info, +}; + +static int marvell_hwmon_name(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + const char *devname = dev_name(dev); + size_t len = strlen(devname); + int i, j; + + priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL); + if (!priv->hwmon_name) + return -ENOMEM; + + for (i = j = 0; i < len && devname[i]; i++) { + if (isalnum(devname[i])) + priv->hwmon_name[j++] = devname[i]; + } + + return 0; +} + +static int marvell_hwmon_probe(struct phy_device *phydev, + const struct hwmon_chip_info *chip) +{ + struct marvell_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int err; + + err = marvell_hwmon_name(phydev); + if (err) + return err; + + priv->hwmon_dev = devm_hwmon_device_register_with_info( + dev, priv->hwmon_name, phydev, chip, NULL); + + return PTR_ERR_OR_ZERO(priv->hwmon_dev); +} + +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info); +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); +} +#else +static int m88e1121_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} + +static int m88e1510_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + static int marvell_probe(struct phy_device *phydev) { struct marvell_priv *priv; @@ -1481,14 +1861,47 @@ static int marvell_probe(struct phy_device *phydev) return 0; } +static int m88e1121_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1121_hwmon_probe(phydev); +} + +static int m88e1510_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e1510_hwmon_probe(phydev); +} + +static void marvell_remove(struct phy_device *phydev) +{ +#ifdef CONFIG_HWMON + + struct marvell_priv *priv = phydev->priv; + + if (priv && priv->hwmon_dev) + hwmon_device_unregister(priv->hwmon_dev); +#endif +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, - .probe = marvell_probe, .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &marvell_config_aneg, .read_status = &genphy_read_status, @@ -1560,7 +1973,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = &m88e1121_probe, + .remove = &marvell_remove, .config_init = &m88e1121_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, @@ -1672,7 +2086,8 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1510", .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = &m88e1510_probe, + .remove = &marvell_remove, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -1693,7 +2108,28 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1540", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = marvell_probe, + .probe = m88e1510_probe, + .remove = &marvell_remove, + .config_init = &marvell_config_init, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + }, + { + .phy_id = MARVELL_PHY_ID_88E1545, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1545", + .probe = m88e1510_probe, + .remove = &marvell_remove, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -1726,6 +2162,25 @@ static struct phy_driver marvell_drivers[] = { .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, }, + { + .phy_id = MARVELL_PHY_ID_88E6390, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .probe = m88e1510_probe, + .config_init = &marvell_config_init, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + }, }; module_phy_driver(marvell_drivers); @@ -1743,7 +2198,9 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 27ab63064f95..7faa79b254ef 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -32,8 +32,7 @@ struct mdio_gpio_info { struct mdiobb_ctrl ctrl; - int mdc, mdio, mdo; - int mdc_active_low, mdio_active_low, mdo_active_low; + struct gpio_desc *mdc, *mdio, *mdo; }; static void *mdio_gpio_of_get_data(struct platform_device *pdev) @@ -80,16 +79,14 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpio_set_value_cansleep(bitbang->mdo, - 1 ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, 1); return; } if (dir) - gpio_direction_output(bitbang->mdio, - 1 ^ bitbang->mdio_active_low); + gpiod_direction_output(bitbang->mdio, 1); else - gpio_direction_input(bitbang->mdio); + gpiod_direction_input(bitbang->mdio); } static int mdio_get(struct mdiobb_ctrl *ctrl) @@ -97,8 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value_cansleep(bitbang->mdio) ^ - bitbang->mdio_active_low; + return gpiod_get_value(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -107,11 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpio_set_value_cansleep(bitbang->mdo, - what ^ bitbang->mdo_active_low); + gpiod_set_value(bitbang->mdo, what); else - gpio_set_value_cansleep(bitbang->mdio, - what ^ bitbang->mdio_active_low); + gpiod_set_value(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -119,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpiod_set_value(bitbang->mdc, what); } static struct mdiobb_ops mdio_gpio_ops = { @@ -137,6 +131,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; int i; + int mdc, mdio, mdo; + unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; + unsigned long mdio_flags = GPIOF_DIR_IN; + unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) @@ -144,12 +142,20 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->ctrl.reset = pdata->reset; - bitbang->mdc = pdata->mdc; - bitbang->mdc_active_low = pdata->mdc_active_low; - bitbang->mdio = pdata->mdio; - bitbang->mdio_active_low = pdata->mdio_active_low; - bitbang->mdo = pdata->mdo; - bitbang->mdo_active_low = pdata->mdo_active_low; + mdc = pdata->mdc; + bitbang->mdc = gpio_to_desc(mdc); + if (pdata->mdc_active_low) + mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW; + mdio = pdata->mdio; + bitbang->mdio = gpio_to_desc(mdio); + if (pdata->mdio_active_low) + mdio_flags |= GPIOF_ACTIVE_LOW; + mdo = pdata->mdo; + if (mdo) { + bitbang->mdo = gpio_to_desc(mdo); + if (pdata->mdo_active_low) + mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW; + } new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) @@ -174,20 +180,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request(dev, bitbang->mdc, "mdc")) + if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc")) goto out_free_bus; - if (devm_gpio_request(dev, bitbang->mdio, "mdio")) + if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio")) goto out_free_bus; - if (bitbang->mdo) { - if (devm_gpio_request(dev, bitbang->mdo, "mdo")) - goto out_free_bus; - gpio_direction_output(bitbang->mdo, 1); - gpio_direction_input(bitbang->mdio); - } - - gpio_direction_output(bitbang->mdc, 0); + if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo")) + goto out_free_bus; dev_set_drvdata(dev, new_bus); diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 92af182951be..f095051beb54 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -311,6 +311,30 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl, } #endif +static const struct of_device_id xgene_mdio_of_match[] = { + { + .compatible = "apm,xgene-mdio-rgmii", + .data = (void *)XGENE_MDIO_RGMII + }, + { + .compatible = "apm,xgene-mdio-xfi", + .data = (void *)XGENE_MDIO_XFI + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_mdio_acpi_match[] = { + { "APMC0D65", XGENE_MDIO_RGMII }, + { "APMC0D66", XGENE_MDIO_XFI }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); +#endif + + static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -430,32 +454,6 @@ static int xgene_mdio_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id xgene_mdio_of_match[] = { - { - .compatible = "apm,xgene-mdio-rgmii", - .data = (void *)XGENE_MDIO_RGMII - }, - { - .compatible = "apm,xgene-mdio-xfi", - .data = (void *)XGENE_MDIO_XFI - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_mdio_acpi_match[] = { - { "APMC0D65", XGENE_MDIO_RGMII }, - { "APMC0D66", XGENE_MDIO_XFI }, - { } -}; - -MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); -#endif - static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h index 354241b53c1d..594a11d42401 100644 --- a/drivers/net/phy/mdio-xgene.h +++ b/drivers/net/phy/mdio-xgene.h @@ -132,10 +132,6 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) #define GET_BIT(field, src) \ xgene_enet_get_field_value(field ## _POS, 1, src) -static const struct of_device_id xgene_mdio_of_match[]; -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_mdio_acpi_match[]; -#endif int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg); int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data); struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 43c8fd46504b..fc3aaaa36b1d 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -67,7 +67,7 @@ int mdio_device_register(struct mdio_device *mdiodev) { int err; - dev_info(&mdiodev->dev, "mdio_device_register\n"); + dev_dbg(&mdiodev->dev, "mdio_device_register\n"); err = mdiobus_register_device(mdiodev); if (err) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 3d3b1f4339ef..a411b43a69eb 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1297,7 +1297,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } -static struct rtnl_link_stats64* +static void ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { struct ppp *ppp = netdev_priv(dev); @@ -1317,8 +1317,6 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) stats64->rx_dropped = dev->stats.rx_dropped; stats64->tx_dropped = dev->stats.tx_dropped; stats64->rx_length_errors = dev->stats.rx_length_errors; - - return stats64; } static int ppp_dev_init(struct net_device *dev) diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 9841f3dc0682..08db4d687533 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -566,7 +566,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu) /* Netdevice get statistics request */ -static struct rtnl_link_stats64 * +static void sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_device_stats *devstats = &dev->stats; @@ -597,7 +597,6 @@ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->collisions += comp->sls_o_misses; } #endif - return stats; } /* Netdevice register callback */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index bdc58567d10e..a3711769544b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1798,7 +1798,7 @@ unwind: return err; } -static struct rtnl_link_stats64 * +static void team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct team *team = netdev_priv(dev); @@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; stats->rx_nohandler = rx_nohandler; - return stats; } static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2cd10b26b650..8a7d6b905362 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -218,6 +218,7 @@ struct tun_struct { struct list_head disabled; void *security; u32 flow_count; + u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; }; @@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile) while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) kfree_skb(skb); + skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } @@ -953,7 +955,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) tun->align = new_hr; } -static struct rtnl_link_stats64 * +static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; @@ -987,7 +989,6 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_dropped = rx_dropped; stats->rx_frame_errors = rx_frame_errors; stats->tx_dropped = tx_dropped; - return stats; } static const struct net_device_ops tun_netdev_ops = { @@ -1140,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, return skb; } +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, + struct sk_buff *skb, int more) +{ + struct sk_buff_head *queue = &tfile->sk.sk_write_queue; + struct sk_buff_head process_queue; + u32 rx_batched = tun->rx_batched; + bool rcv = false; + + if (!rx_batched || (!more && skb_queue_empty(queue))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + return; + } + + spin_lock(&queue->lock); + if (!more || skb_queue_len(queue) == rx_batched) { + __skb_queue_head_init(&process_queue); + skb_queue_splice_tail_init(queue, &process_queue); + rcv = true; + } else { + __skb_queue_tail(queue, skb); + } + spin_unlock(&queue->lock); + + if (rcv) { + struct sk_buff *nskb; + + local_bh_disable(); + while ((nskb = __skb_dequeue(&process_queue))) + netif_receive_skb(nskb); + netif_receive_skb(skb); + local_bh_enable(); + } +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, - int noblock) + int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; @@ -1284,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rxhash = skb_get_hash(skb); #ifndef CONFIG_4KSTACKS - local_bh_disable(); - netif_receive_skb(skb); - local_bh_enable(); + tun_rx_batched(tun, tfile, skb, more); #else netif_rx_ni(skb); #endif @@ -1312,7 +1347,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!tun) return -EBADFD; - result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); + result = tun_get_user(tun, tfile, NULL, from, + file->f_flags & O_NONBLOCK, false); tun_put(tun); return result; @@ -1570,7 +1606,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return -EBADFD; ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, - m->msg_flags & MSG_DONTWAIT); + m->msg_flags & MSG_DONTWAIT, + m->msg_flags & MSG_MORE); tun_put(tun); return ret; } @@ -1771,6 +1808,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; + tun->rx_batched = 0; tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); if (!tun->pcpu_stats) { @@ -2439,6 +2477,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) #endif } +static int tun_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + ec->rx_max_coalesced_frames = tun->rx_batched; + + return 0; +} + +static int tun_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct tun_struct *tun = netdev_priv(dev); + + if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) + tun->rx_batched = NAPI_POLL_WEIGHT; + else + tun->rx_batched = ec->rx_max_coalesced_frames; + + return 0; +} + static const struct ethtool_ops tun_ethtool_ops = { .get_settings = tun_get_settings, .get_drvinfo = tun_get_drvinfo, @@ -2446,6 +2507,8 @@ static const struct ethtool_ops tun_ethtool_ops = { .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_coalesce = tun_get_coalesce, + .set_coalesce = tun_set_coalesce, }; static int tun_queue_resize(struct tun_struct *tun) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 86144f9a80ee..f5552aaaa77a 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * connected. This causes the link state to be incorrect. Work around this by * always setting the state to off, then on. */ -void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) +static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ad42295356dd..986243c932cc 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3590,7 +3590,7 @@ static bool delay_autosuspend(struct r8152 *tp) return false; } -static int rtl8152_rumtime_suspend(struct r8152 *tp) +static int rtl8152_runtime_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; int ret = 0; @@ -3672,7 +3672,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) - ret = rtl8152_rumtime_suspend(tp); + ret = rtl8152_runtime_suspend(tp); else ret = rtl8152_system_suspend(tp); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 0520952aa096..8c39d6d690e5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -158,8 +158,8 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) return atomic64_read(&priv->dropped); } -static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void veth_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; @@ -177,8 +177,6 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, tot->rx_packets = one.packets; } rcu_read_unlock(); - - return tot; } /* fake multicast ability */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 765c2d6358da..29982c7f6080 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -23,12 +23,12 @@ #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/average.h> -#include <net/busy_poll.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -338,7 +338,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_xmit(struct virtnet_info *vi, +static bool virtnet_xdp_xmit(struct virtnet_info *vi, struct receive_queue *rq, struct send_queue *sq, struct xdp_buff *xdp, @@ -390,10 +390,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi, put_page(page); } else /* small buffer */ kfree_skb(data); - return; // On error abort to avoid unnecessary kick + /* On error abort to avoid unnecessary kick */ + return false; } virtqueue_kick(sq->vq); + return true; } static u32 do_xdp_prog(struct virtnet_info *vi, @@ -429,11 +431,14 @@ static u32 do_xdp_prog(struct virtnet_info *vi, vi->xdp_queue_pairs + smp_processor_id(); xdp.data = buf; - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, + data))) + trace_xdp_exception(vi->dev, xdp_prog, act); return XDP_TX; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: + trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: return XDP_DROP; } @@ -1007,53 +1012,17 @@ static int virtnet_poll(struct napi_struct *napi, int budget) /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); - napi_complete_done(napi, received); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - __napi_schedule(napi); - } - } - - return received; -} - -#ifdef CONFIG_NET_RX_BUSY_POLL -/* must be called with local_bh_disable()d */ -static int virtnet_busy_poll(struct napi_struct *napi) -{ - struct receive_queue *rq = - container_of(napi, struct receive_queue, napi); - struct virtnet_info *vi = rq->vq->vdev->priv; - int r, received = 0, budget = 4; - - if (!(vi->status & VIRTIO_NET_S_LINK_UP)) - return LL_FLUSH_FAILED; - - if (!napi_schedule_prep(napi)) - return LL_FLUSH_BUSY; - - virtqueue_disable_cb(rq->vq); - -again: - received += virtnet_receive(rq, budget); - - r = virtqueue_enable_cb_prepare(rq->vq); - clear_bit(NAPI_STATE_SCHED, &napi->state); - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - if (received < budget) { - budget -= received; - goto again; - } else { - __napi_schedule(napi); + if (napi_complete_done(napi, received)) { + if (unlikely(virtqueue_poll(rq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(rq->vq); + __napi_schedule(napi); + } } } return received; } -#endif /* CONFIG_NET_RX_BUSY_POLL */ static int virtnet_open(struct net_device *dev) { @@ -1244,10 +1213,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) struct sockaddr *addr; struct scatterlist sg; - addr = kmalloc(sizeof(*addr), GFP_KERNEL); + addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; - memcpy(addr, p, sizeof(*addr)); ret = eth_prepare_mac_addr_change(dev, addr); if (ret) @@ -1281,8 +1249,8 @@ out: return ret; } -static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void virtnet_stats(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; @@ -1315,8 +1283,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; - - return tot; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1814,9 +1780,6 @@ static const struct net_device_ops virtnet_netdev = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = virtnet_busy_poll, -#endif .ndo_xdp = virtnet_xdp, }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e34b1297c96a..25bc764ae7dc 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget) rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_all_intrs(rx_queue->adapter); } return rxd_done; @@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); if (rxd_done < budget) { - napi_complete(napi); + napi_complete_done(napi, rxd_done); vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); } return rxd_done; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index aabc6ef366b4..f88ffafebfbf 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -113,7 +113,7 @@ vmxnet3_global_stats[] = { }; -struct rtnl_link_stats64 * +void vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *netdev, stats->rx_dropped += drvRxStats->drop_total; stats->multicast += devRxStats->mcastPktsRxOK; } - - return stats; } static int diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59e077be8829..ba1c9f93592b 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -465,8 +465,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, void vmxnet3_set_ethtool_ops(struct net_device *netdev); -struct rtnl_link_stats64 * -vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +void vmxnet3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); extern char vmxnet3_driver_name[]; #endif diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 454f907d419a..264fc1585b3c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -77,8 +77,8 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) kfree_skb(skb); } -static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) +static void vrf_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { int i; @@ -102,7 +102,6 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, stats->rx_bytes += rbytes; stats->rx_packets += rpkts; } - return stats; } /* Local traffic destined to local address. Reinsert the packet to rx diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 50b62db213b0..2374a75dcb55 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -75,6 +75,7 @@ struct vxlan_fdb { struct list_head remotes; u8 eth_addr[ETH_ALEN]; u16 state; /* see ndm_state */ + __be32 vni; u8 flags; /* see ndm_flags */ }; @@ -302,6 +303,10 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (rdst->remote_vni != vxlan->default_dst.remote_vni && nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) goto nla_put_failure; + if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && + nla_put_u32(skb, NDA_SRC_VNI, + be32_to_cpu(fdb->vni))) + goto nla_put_failure; if (rdst->remote_ifindex && nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) goto nla_put_failure; @@ -400,34 +405,51 @@ static u32 eth_hash(const unsigned char *addr) return hash_64(value, FDB_HASH_BITS); } +static u32 eth_vni_hash(const unsigned char *addr, __be32 vni) +{ + /* use 1 byte of OUI and 3 bytes of NIC */ + u32 key = get_unaligned((u32 *)(addr + 2)); + + return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); +} + /* Hash chain to use given mac address */ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { - return &vxlan->fdb_head[eth_hash(mac)]; + if (vxlan->flags & VXLAN_F_COLLECT_METADATA) + return &vxlan->fdb_head[eth_vni_hash(mac, vni)]; + else + return &vxlan->fdb_head[eth_hash(mac)]; } /* Look up Ethernet address in forwarding table */ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { - struct hlist_head *head = vxlan_fdb_head(vxlan, mac); + struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); struct vxlan_fdb *f; hlist_for_each_entry_rcu(f, head, hlist) { - if (ether_addr_equal(mac, f->eth_addr)) - return f; + if (ether_addr_equal(mac, f->eth_addr)) { + if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { + if (vni == f->vni) + return f; + } else { + return f; + } + } } return NULL; } static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, - const u8 *mac) + const u8 *mac, __be32 vni) { struct vxlan_fdb *f; - f = __vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac, vni); if (f) f->used = jiffies; @@ -605,15 +627,15 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, - __be16 port, __be32 vni, __u32 ifindex, - __u8 ndm_flags) + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u8 ndm_flags) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; int rc; - f = __vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac, src_vni); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, @@ -670,6 +692,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, f->state = state; f->flags = ndm_flags; f->updated = f->used = jiffies; + f->vni = src_vni; INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); @@ -681,7 +704,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, ++vxlan->addrcnt; hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac)); + vxlan_fdb_head(vxlan, mac, src_vni)); } if (notify) { @@ -718,8 +741,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, - union vxlan_addr *ip, __be16 *port, __be32 *vni, - u32 *ifindex) + union vxlan_addr *ip, __be16 *port, __be32 *src_vni, + __be32 *vni, u32 *ifindex) { struct net *net = dev_net(vxlan->dev); int err; @@ -757,6 +780,14 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, *vni = vxlan->default_dst.remote_vni; } + if (tb[NDA_SRC_VNI]) { + if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) + return -EINVAL; + *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); + } else { + *src_vni = vxlan->default_dst.remote_vni; + } + if (tb[NDA_IFINDEX]) { struct net_device *tdev; @@ -782,7 +813,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* struct net *net = dev_net(vxlan->dev); */ union vxlan_addr ip; __be16 port; - __be32 vni; + __be32 src_vni, vni; u32 ifindex; int err; @@ -795,7 +826,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (tb[NDA_DST] == NULL) return -EINVAL; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); if (err) return err; @@ -804,36 +835,24 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, - port, vni, ifindex, ndm->ndm_flags); + port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); return err; } -/* Delete entry (via netlink) */ -static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) +static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, + const unsigned char *addr, union vxlan_addr ip, + __be16 port, __be32 src_vni, u32 vni, u32 ifindex, + u16 vid) { - struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; - union vxlan_addr ip; - __be16 port; - __be32 vni; - u32 ifindex; - int err; + int err = -ENOENT; - err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); - if (err) - return err; - - err = -ENOENT; - - spin_lock_bh(&vxlan->hash_lock); - f = vxlan_find_mac(vxlan, addr); + f = vxlan_find_mac(vxlan, addr, src_vni); if (!f) - goto out; + return err; if (!vxlan_addr_any(&ip)) { rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); @@ -841,8 +860,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], goto out; } - err = 0; - /* remove a destination if it's not the only one on the list, * otherwise destroy the fdb entry */ @@ -856,6 +873,28 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], vxlan_fdb_destroy(vxlan, f); out: + return 0; +} + +/* Delete entry (via netlink) */ +static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + union vxlan_addr ip; + __be32 src_vni, vni; + __be16 port; + u32 ifindex; + int err; + + err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex); + if (err) + return err; + + spin_lock_bh(&vxlan->hash_lock); + err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, + vid); spin_unlock_bh(&vxlan->hash_lock); return err; @@ -901,12 +940,13 @@ out: * Return true if packet is bogus and should be dropped. */ static bool vxlan_snoop(struct net_device *dev, - union vxlan_addr *src_ip, const u8 *src_mac) + union vxlan_addr *src_ip, const u8 *src_mac, + __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; - f = vxlan_find_mac(vxlan, src_mac); + f = vxlan_find_mac(vxlan, src_mac, vni); if (likely(f)) { struct vxlan_rdst *rdst = first_remote_rcu(f); @@ -935,6 +975,7 @@ static bool vxlan_snoop(struct net_device *dev, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, + vni, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); @@ -1202,7 +1243,7 @@ static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed, static bool vxlan_set_mac(struct vxlan_dev *vxlan, struct vxlan_sock *vs, - struct sk_buff *skb) + struct sk_buff *skb, __be32 vni) { union vxlan_addr saddr; @@ -1226,7 +1267,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, } if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni)) return false; return true; @@ -1268,6 +1309,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) __be16 protocol = htons(ETH_P_TEB); bool raw_proto = false; void *oiph; + __be32 vni = 0; /* Need UDP and VXLAN header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1289,7 +1331,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); + vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + + if ((vs->flags & VXLAN_F_COLLECT_METADATA) && !vni) + goto drop; + + vxlan = vxlan_vs_find_vni(vs, vni); if (!vxlan) goto drop; @@ -1307,7 +1354,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; if (vxlan_collect_metadata(vs)) { - __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); struct metadata_dst *tun_dst; tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, @@ -1345,7 +1391,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) } if (!raw_proto) { - if (!vxlan_set_mac(vxlan, vs, skb)) + if (!vxlan_set_mac(vxlan, vs, skb, vni)) goto drop; } else { skb_reset_mac_header(skb); @@ -1377,7 +1423,7 @@ drop: return 0; } -static int arp_reduce(struct net_device *dev, struct sk_buff *skb) +static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct arphdr *parp; @@ -1424,7 +1470,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) goto out; } - f = vxlan_find_mac(vxlan, n->ha); + f = vxlan_find_mac(vxlan, n->ha, vni); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); @@ -1548,7 +1594,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, return reply; } -static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) +static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); struct nd_msg *msg; @@ -1585,7 +1631,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) goto out; } - f = vxlan_find_mac(vxlan, n->ha); + f = vxlan_find_mac(vxlan, n->ha, vni); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); @@ -1906,7 +1952,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, - struct vxlan_dev *dst_vxlan) + struct vxlan_dev *dst_vxlan, __be32 vni) { struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; @@ -1932,7 +1978,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, } if (dst_vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); + vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -1951,7 +1997,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan, union vxlan_addr *daddr, - __be32 dst_port, __be32 vni, struct dst_entry *dst, + __be16 dst_port, __be32 vni, struct dst_entry *dst, u32 rt_flags) { #if IS_ENABLED(CONFIG_IPV6) @@ -1976,7 +2022,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, return -ENOENT; } - vxlan_encap_bypass(skb, vxlan, dst_vxlan); + vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni); return 1; } @@ -1984,7 +2030,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, } static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, - struct vxlan_rdst *rdst, bool did_rsc) + __be32 default_vni, struct vxlan_rdst *rdst, + bool did_rsc) { struct dst_cache *dst_cache; struct ip_tunnel_info *info; @@ -2011,14 +2058,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (vxlan_addr_any(dst)) { if (did_rsc) { /* short-circuited back to local bridge */ - vxlan_encap_bypass(skb, vxlan, vxlan); + vxlan_encap_bypass(skb, vxlan, vxlan, default_vni); return; } goto drop; } dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; - vni = rdst->remote_vni; + vni = (rdst->remote_vni) ? : default_vni; src = &vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; md->gbp = skb->mark; @@ -2173,23 +2220,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + __be32 vni = 0; info = skb_tunnel_info(skb); skb_reset_mac_header(skb); if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { - if (info && info->mode & IP_TUNNEL_INFO_TX) - vxlan_xmit_one(skb, dev, NULL, false); - else - kfree_skb(skb); - return NETDEV_TX_OK; + if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && + info->mode & IP_TUNNEL_INFO_TX) { + vni = tunnel_id_to_key32(info->key.tun_id); + } else { + if (info && info->mode & IP_TUNNEL_INFO_TX) + vxlan_xmit_one(skb, dev, vni, NULL, false); + else + kfree_skb(skb); + return NETDEV_TX_OK; + } } if (vxlan->flags & VXLAN_F_PROXY) { eth = eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_ARP) - return arp_reduce(dev, skb); + return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(eth->h_proto) == ETH_P_IPV6 && pskb_may_pull(skb, sizeof(struct ipv6hdr) @@ -2200,13 +2253,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code == 0 && msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) - return neigh_reduce(dev, skb); + return neigh_reduce(dev, skb, vni); } #endif } eth = eth_hdr(skb); - f = vxlan_find_mac(vxlan, eth->h_dest); + f = vxlan_find_mac(vxlan, eth->h_dest, vni); did_rsc = false; if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && @@ -2214,11 +2267,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) ntohs(eth->h_proto) == ETH_P_IPV6)) { did_rsc = route_shortcircuit(dev, skb); if (did_rsc) - f = vxlan_find_mac(vxlan, eth->h_dest); + f = vxlan_find_mac(vxlan, eth->h_dest, vni); } if (f == NULL) { - f = vxlan_find_mac(vxlan, all_zeros_mac); + f = vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f == NULL) { if ((vxlan->flags & VXLAN_F_L2MISS) && !is_multicast_ether_addr(eth->h_dest)) @@ -2239,11 +2292,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) } skb1 = skb_clone(skb, GFP_ATOMIC); if (skb1) - vxlan_xmit_one(skb1, dev, rdst, did_rsc); + vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); } if (fdst) - vxlan_xmit_one(skb, dev, fdst, did_rsc); + vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); else kfree_skb(skb); return NETDEV_TX_OK; @@ -2307,12 +2360,12 @@ static int vxlan_init(struct net_device *dev) return 0; } -static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) +static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) { struct vxlan_fdb *f; spin_lock_bh(&vxlan->hash_lock); - f = __vxlan_find_mac(vxlan, all_zeros_mac); + f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) vxlan_fdb_destroy(vxlan, f); spin_unlock_bh(&vxlan->hash_lock); @@ -2322,7 +2375,7 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - vxlan_fdb_delete_default(vxlan); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); free_percpu(dev->tstats); } @@ -2923,6 +2976,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, NTF_SELF); if (err) @@ -2931,7 +2985,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err) { - vxlan_fdb_delete_default(vxlan); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); return err; } diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index e38ce4da3efb..d869533d2e79 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget) howmany += hdlc_rx_done(priv, budget - howmany); if (howmany < budget) { - napi_complete(napi); + napi_complete_done(napi, howmany); qe_setbits32(priv->uccf->p_uccm, (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); } diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 7ef49dab6855..cff0cfadd650 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget) received = sca_rx_done(port, budget); if (received < budget) { - napi_complete(napi); + napi_complete_done(napi, received); enable_intr(port); } diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 70ecd82d674d..098c814e22c8 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev) skb_tail_pointer(newskb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(priv->pdev, + priv->rx_buffers[entry].mapping)) { + priv->rx_buffers[entry].skb = NULL; + dev_kfree_skb(newskb); + skb = NULL; + /* TODO: update rx dropped stats */ + } } else { skb = NULL; /* TODO: update rx dropped stats */ @@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev) skb_tail_pointer(rx_info->skb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) { + dev_kfree_skb(rx_info->skb); + rx_info->skb = NULL; + break; + } + desc->buffer1 = cpu_to_le32(rx_info->mapping); desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); } @@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int } /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ -static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, +static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, u16 plcp_signal, size_t hdrlen) { @@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, mapping)) + return -ENOMEM; spin_lock_irqsave(&priv->lock, flags); @@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, /* Trigger transmit poll */ ADM8211_CSR_WRITE(TDR, 0); + + return 0; } /* Put adm8211_tx_hdr on skb and transmit */ @@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev, txhdr->retry_limit = info->control.rates[0].count; - adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); + if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) { + /* Drop packet */ + ieee80211_free_txskb(dev, skb); + } } static int adm8211_alloc_rings(struct ieee80211_hw *dev) @@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev, priv->rx_ring_size = rx_ring_size; priv->tx_ring_size = tx_ring_size; - if (adm8211_alloc_rings(dev)) { + err = adm8211_alloc_rings(dev); + if (err) { printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", pci_name(pdev)); goto err_iounmap; diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 0b4d79659884..da466ab2d823 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -958,7 +958,7 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = - dma_alloc_coherent(ar->dev, + dma_zalloc_coherent(ar->dev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); @@ -969,13 +969,6 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, dest_ring->base_addr_ce_space_unaligned = base_addr; - /* - * Correctly initialize memory to 0 to prevent garbage - * data crashing system when download firmware - */ - memset(dest_ring->base_addr_owner_space_unaligned, 0, - nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); - dest_ring->base_addr_owner_space = PTR_ALIGN( dest_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); @@ -1130,3 +1123,42 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ce_state->src_ring = NULL; ce_state->dest_ring = NULL; } + +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_crash_data ce; + u32 addr, id; + + lockdep_assert_held(&ar->data_lock); + + ath10k_err(ar, "Copy Engine register dump:\n"); + + spin_lock_bh(&ar_pci->ce_lock); + for (id = 0; id < CE_COUNT; id++) { + addr = ath10k_ce_base_address(ar, id); + ce.base_addr = cpu_to_le32(addr); + + ce.src_wr_idx = + cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr)); + ce.src_r_idx = + cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr)); + ce.dst_wr_idx = + cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr)); + ce.dst_r_idx = + cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr)); + + if (crash_data) + crash_data->ce_crash_data[id] = ce; + + ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id, + le32_to_cpu(ce.base_addr), + le32_to_cpu(ce.src_wr_idx), + le32_to_cpu(ce.src_r_idx), + le32_to_cpu(ce.dst_wr_idx), + le32_to_cpu(ce.dst_r_idx)); + } + + spin_unlock_bh(&ar_pci->ce_lock); +} diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index dfc098606bee..e76a98242b98 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -20,8 +20,6 @@ #include "hif.h" -/* Maximum number of Copy Engine's supported */ -#define CE_COUNT_MAX 12 #define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 /* Descriptor rings must be aligned to this boundary */ @@ -228,6 +226,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); +void ath10k_ce_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 749e381edd38..c2afcca6fd60 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,8 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <linux/dmi.h> +#include <linux/ctype.h> #include <asm/byteorder.h> #include "core.h" @@ -694,8 +696,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) "boot get otp board id result 0x%08x board_id %d chip_id %d\n", result, board_id, chip_id); - if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0) + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || + (board_id == 0)) { + ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); return -EOPNOTSUPP; + } ar->id.bmi_ids_valid = true; ar->id.bmi_board_id = board_id; @@ -704,6 +709,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) return 0; } +static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath10k *ar = data; + const char *bdf_ext; + const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; + u8 bdf_enabled; + int i; + + if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); + if (!bdf_enabled) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + bdf_ext = (char *)hdr + hdr->length; + + if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + for (i = 0; i < strlen(bdf_ext); i++) { + if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic suffix */ + if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), + sizeof(ar->id.bdf_ext)) < 0) { + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + bdf_ext); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); +} + +static int ath10k_core_check_smbios(struct ath10k *ar) +{ + ar->id.bdf_ext[0] = '\0'; + dmi_walk(ath10k_core_check_bdfext, ar); + + if (ar->id.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1050,6 +1121,9 @@ err: static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { + /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; + if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", @@ -1059,12 +1133,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, goto out; } + if (ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + scnprintf(name, name_len, - "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x", + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, - ar->id.subsystem_vendor, ar->id.subsystem_device); - + ar->id.subsystem_vendor, ar->id.subsystem_device, variant); out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -1510,6 +1587,7 @@ static int ath10k_init_hw_params(struct ath10k *ar) static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); + int ret; set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); @@ -1561,6 +1639,11 @@ static void ath10k_core_restart(struct work_struct *work) } mutex_unlock(&ar->conf_mutex); + + ret = ath10k_debug_fw_devcoredump(ar); + if (ret) + ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", + ret); } static void ath10k_core_set_coverage_class_work(struct work_struct *work) @@ -2119,6 +2202,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); + ret = ath10k_core_fetch_board_file(ar); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 09ff8b8a6441..88d14be7fcce 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -46,7 +46,7 @@ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) -#define ATH10K_NUM_CHANS 39 +#define ATH10K_NUM_CHANS 40 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 @@ -69,6 +69,23 @@ #define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_QUOTA_LIMIT 60 +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* Offset pointing to Board Data File Name Extension */ +#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8 + +/* Board Data File Name Extension string length. + * String format: BDF_<Customer ID>_<Extension>\0 + */ +#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20 + +/* The magic used by QCA spec */ +#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" + struct ath10k; enum ath10k_bus { @@ -314,6 +331,7 @@ struct ath10k_peer { struct ieee80211_vif *vif; struct ieee80211_sta *sta; + bool removed; int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); @@ -419,6 +437,21 @@ struct ath10k_vif_iter { struct ath10k_vif *arvif; }; +/* Copy Engine register dump, protected by ce-lock */ +struct ath10k_ce_crash_data { + __le32 base_addr; + __le32 src_wr_idx; + __le32 src_r_idx; + __le32 dst_wr_idx; + __le32 dst_r_idx; +}; + +struct ath10k_ce_crash_hdr { + __le32 ce_count; + __le32 reserved[3]; /* for future use */ + struct ath10k_ce_crash_data entries[]; +}; + /* used for crash-dump storage, protected by data-lock */ struct ath10k_fw_crash_data { bool crashed_since_read; @@ -426,6 +459,7 @@ struct ath10k_fw_crash_data { uuid_le uuid; struct timespec timestamp; __le32 registers[REG_DUMP_COUNT_QCA988X]; + struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; }; struct ath10k_debug { @@ -781,6 +815,8 @@ struct ath10k { bool bmi_ids_valid; u8 bmi_board_id; u8 bmi_chip_id; + + char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; } id; int fw_api; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 82a4c67f3672..d5ff0f4ef5ce 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -21,6 +21,7 @@ #include <linux/utsname.h> #include <linux/crc32.h> #include <linux/firmware.h> +#include <linux/devcoredump.h> #include "core.h" #include "debug.h" @@ -40,6 +41,7 @@ */ enum ath10k_fw_crash_dump_type { ATH10K_FW_CRASH_DUMP_REGISTERS = 0, + ATH10K_FW_CRASH_DUMP_CE_DATA = 1, ATH10K_FW_CRASH_DUMP_MAX, }; @@ -399,6 +401,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * prevent firmware from DoS-ing the host. */ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } @@ -409,10 +412,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } + if (!list_empty(&stats.peers)) + list_splice_tail_init(&stats.peers_extd, + &ar->debug.fw_stats.peers_extd); + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); - list_splice_tail_init(&stats.peers_extd, - &ar->debug.fw_stats.peers_extd); } complete(&ar->debug.fw_stats_complete); @@ -721,9 +726,11 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data); -static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) +static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, + bool mark_read) { struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; int hdr_len = sizeof(*dump_data); @@ -732,6 +739,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) len = hdr_len; len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); sofar += hdr_len; @@ -790,19 +799,66 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) sizeof(crash_data->registers)); sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); - ar->debug.fw_crash_data->crashed_since_read = false; + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); + dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0])); + ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); + ce_hdr->ce_count = cpu_to_le32(CE_COUNT); + memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); + memcpy(ce_hdr->entries, crash_data->ce_crash_data, + CE_COUNT * sizeof(ce_hdr->entries[0])); + sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + + ar->debug.fw_crash_data->crashed_since_read = !mark_read; spin_unlock_bh(&ar->data_lock); return dump_data; } +int ath10k_debug_fw_devcoredump(struct ath10k *ar) +{ + struct ath10k_dump_file_data *dump; + void *dump_ptr; + u32 dump_len; + + /* To keep the dump file available also for debugfs don't mark the + * file read, only debugfs should do that. + */ + dump = ath10k_build_dump_file(ar, false); + if (!dump) { + ath10k_warn(ar, "no crash dump data found for devcoredump"); + return -ENODATA; + } + + /* Make a copy of the dump file for dev_coredumpv() as during the + * transition period we need to own the original file. Once + * fw_crash_dump debugfs file is removed no need to have a copy + * anymore. + */ + dump_len = le32_to_cpu(dump->len); + dump_ptr = vzalloc(dump_len); + + if (!dump_ptr) + return -ENOMEM; + + memcpy(dump_ptr, dump, dump_len); + + dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL); + + return 0; +} + static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; struct ath10k_dump_file_data *dump; - dump = ath10k_build_dump_file(ar); + ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead."); + + dump = ath10k_build_dump_file(ar, true); if (!dump) return -ENODATA; diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 335512b11ca2..2368f47314ae 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -84,6 +84,9 @@ struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); + +int ath10k_debug_fw_devcoredump(struct ath10k *ar); + #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, @@ -166,6 +169,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) return 0; } +static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar) +{ + return 0; +} + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ath10k_debug_get_et_strings NULL diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 175aae38c375..9f6a915f91bf 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -474,33 +474,16 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) } } -static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc) -{ - struct ath10k_htc_svc_tx_credits *entry; - - entry = &htc->service_tx_alloc[0]; - - /* - * for PCIE allocate all credists/HTC buffers to WMI. - * no buffers are used/required for data. data always - * remains on host. - */ - entry++; - entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; - entry->credit_allocation = htc->total_transmit_credits; -} - static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, u16 service_id) { u8 allocation = 0; - int i; - for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { - if (htc->service_tx_alloc[i].service_id == service_id) - allocation = - htc->service_tx_alloc[i].credit_allocation; - } + /* The WMI control service is the only service with flow control. + * Let it have all transmit credits. + */ + if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL) + allocation = htc->total_transmit_credits; return allocation; } @@ -574,8 +557,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) return -ECOMM; } - ath10k_htc_setup_target_buffer_assignments(htc); - /* setup our pseudo HTC control endpoint connection */ memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); @@ -726,12 +707,6 @@ setup: ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ep->tx_credits = tx_alloc; - ep->tx_credit_size = htc->target_credit_size; - ep->tx_credits_per_max_message = ep->max_ep_message_len / - htc->target_credit_size; - - if (ep->max_ep_message_len % htc->target_credit_size) - ep->tx_credits_per_max_message++; /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 0c55cd92a951..6ababa345e2b 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -314,8 +314,6 @@ struct ath10k_htc_ep { u8 seq_no; /* for debugging */ int tx_credits; - int tx_credit_size; - int tx_credits_per_max_message; bool tx_credit_flow_enabled; }; @@ -339,7 +337,6 @@ struct ath10k_htc { struct completion ctl_resp; int total_transmit_credits; - struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; }; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 86d082cf4eef..2743a9bcd556 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -702,6 +702,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, /* 80MHZ */ case 2: status->vht_flag |= RX_VHT_FLAG_80MHZ; + break; + case 3: + status->vht_flag |= RX_VHT_FLAG_160MHZ; + break; } status->flag |= RX_FLAG_VHT; @@ -926,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar, *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), @@ -940,6 +944,7 @@ static void ath10k_process_rx(struct ath10k *ar, status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", + status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "", status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->vht_nss, @@ -2231,6 +2236,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, return; } + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + if (txrate.flags == WMI_RATE_PREAMBLE_CCK || txrate.flags == WMI_RATE_PREAMBLE_OFDM) { rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); @@ -2245,7 +2252,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, rate *= 10; if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK) rate = rate - 5; - arsta->txrate.legacy = rate * 10; + arsta->txrate.legacy = rate; } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { arsta->txrate.flags = RATE_INFO_FLAGS_MCS; arsta->txrate.mcs = txrate.mcs; @@ -2451,8 +2458,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) u32 phymode = __le32_to_cpu(resp->chan_change.phymode); u32 freq = __le32_to_cpu(resp->chan_change.freq); - ar->tgt_oper_chan = - __ieee80211_get_channel(ar->hw->wiphy, freq); + ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt chan change freq %u phymode %s\n", freq, ath10k_wmi_phymode_str(phymode)); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 27e49db4287a..86b427f5e2bc 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -239,6 +239,7 @@ static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt) size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); + htt->txbuf.vaddr = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt) @@ -268,6 +269,7 @@ static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) size, htt->frag_desc.vaddr, htt->frag_desc.paddr); + htt->frag_desc.vaddr = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 883547f3347c..38aa7c95732e 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -512,7 +512,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 #define TARGET_TLV_NUM_STATIONS 32 -#define TARGET_TLV_NUM_PEERS 35 +#define TARGET_TLV_NUM_PEERS 33 #define TARGET_TLV_NUM_TDLS_VDEVS 1 #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) @@ -578,6 +578,9 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_IPHDR_PAD_CONFIG 1 #define TARGET_10_4_QWRAP_CONFIG 0 +/* Maximum number of Copy Engine's supported */ +#define CE_COUNT_MAX 12 + /* Number of Copy Engines supported */ #define CE_COUNT ar->hw_values->ce_count diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index aa545a1dbdc7..9977829a6ec4 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -569,10 +569,14 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_80: phymode = MODE_11AC_VHT80; break; + case NL80211_CHAN_WIDTH_160: + phymode = MODE_11AC_VHT160; + break; + case NL80211_CHAN_WIDTH_80P80: + phymode = MODE_11AC_VHT80_80; + break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: phymode = MODE_UNKNOWN; break; } @@ -971,6 +975,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; /* TODO setup this dynamically, what in case we don't have any vifs? */ @@ -1227,6 +1232,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar) return ath10k_monitor_stop(ar); } +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (!arvif->is_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); + return false; + } + + return true; +} + +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->protection_mode; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", + arvif->vdev_id, arvif->use_cts_prot); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->use_cts_prot ? 1 : 0); +} + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -1245,6 +1280,9 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, WMI_RTSCTS_PROFILE); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", + arvif->vdev_id, rts_cts); + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rts_cts); } @@ -1384,6 +1422,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, arg.channel.freq = chandef->chan->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = chan_to_phymode(chandef); arg.channel.min_power = 0; @@ -2447,6 +2486,9 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, if (sta->bandwidth == IEEE80211_STA_RX_BW_80) arg->peer_flags |= ar->wmi.peer_flags->bw80; + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + arg->peer_flags |= ar->wmi.peer_flags->bw160; + arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->peer_vht_rates.rx_mcs_set = @@ -2500,6 +2542,33 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) ATH10K_MAC_FIRST_OFDM_RATE_IDX; } +static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, + struct ieee80211_sta *sta) +{ + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { + switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: + return MODE_11AC_VHT160; + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: + return MODE_11AC_VHT80_80; + default: + /* not sure if this is a valid case? */ + return MODE_11AC_VHT160; + } + } + + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11AC_VHT80; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11AC_VHT40; + + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11AC_VHT20; + + return MODE_UNKNOWN; +} + static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2546,12 +2615,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, */ if (sta->vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) - phymode = MODE_11AC_VHT80; - else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) - phymode = MODE_11AC_VHT40; - else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) - phymode = MODE_11AC_VHT20; + phymode = ath10k_mac_get_phymode_vht(ar, sta); } else if (sta->ht_cap.ht_supported && !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) @@ -3495,7 +3559,6 @@ static int ath10k_mac_tx_submit(struct ath10k *ar, */ static int ath10k_mac_tx(struct ath10k *ar, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, enum ath10k_hw_txrx_mode txmode, enum ath10k_mac_tx_path txpath, struct sk_buff *skb) @@ -3637,7 +3700,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", ret); @@ -3742,6 +3805,9 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, if (!peer) return NULL; + if (peer->removed) + return NULL; + if (peer->sta) return peer->sta->txq[tid]; else if (peer->vif) @@ -3824,7 +3890,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (unlikely(ret)) { ath10k_warn(ar, "failed to push frame: %d\n", ret); @@ -4105,7 +4171,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); if (ret) { ath10k_warn(ar, "failed to transmit frame: %d\n", ret); if (is_htt) { @@ -4279,6 +4345,13 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) vht_cap.cap |= val; } + /* Currently the firmware seems to be buggy, don't enable 80+80 + * mode until that's resolved. + */ + if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && + !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) + vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + mcs_map = 0; for (i = 0; i < 8; i++) { if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) @@ -4669,7 +4742,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - WARN_ON(arvif->txpower < 0); + if (arvif->txpower <= 0) + continue; if (txpower == -1) txpower = arvif->txpower; @@ -4677,8 +4751,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) txpower = min(txpower, arvif->txpower); } - if (WARN_ON(txpower == -1)) - return -EINVAL; + if (txpower == -1) + return 0; ret = ath10k_mac_txpower_setup(ar, txpower); if (ret) { @@ -5194,6 +5268,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + spin_lock_bh(&ar->htt.tx_lock); ath10k_mac_vif_tx_unlock_all(arvif); spin_unlock_bh(&ar->htt.tx_lock); @@ -5328,20 +5406,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", - arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); - vdev_param = ar->wmi.vdev_param->protection_mode; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, - info->use_cts_prot ? 1 : 0); - if (ret) - ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", - info->use_cts_prot, arvif->vdev_id, ret); + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -6934,6 +7010,9 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: + bw = WMI_PEER_CHWIDTH_160MHZ; + break; + default: ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", sta->bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; @@ -7364,6 +7443,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_up = true; } + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); return 0; @@ -7434,6 +7520,20 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, return 0; } +static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath10k *ar; + struct ath10k_peer *peer; + + ar = hw->priv; + + list_for_each_entry(peer, &ar->peers, list) + if (peer->sta == sta) + peer->removed = true; +} + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_mac_op_tx, .wake_tx_queue = ath10k_mac_op_wake_tx_queue, @@ -7474,6 +7574,7 @@ static const struct ieee80211_ops ath10k_ops = { .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, + .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) @@ -7548,6 +7649,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { CHAN5G(157, 5785, 0), CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), + CHAN5G(169, 5845, 0), }; struct ath10k *ath10k_mac_create(size_t priv_size) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index b541a1c74488..79e61459bc6c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -896,7 +896,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, */ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); - data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, + data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, GFP_ATOMIC); @@ -905,7 +905,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ret = -ENOMEM; goto done; } - memset(data_buf, 0, alloc_nbytes); remaining_bytes = nbytes; ce_data = ce_data_base; @@ -1474,6 +1473,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); + ath10k_ce_dump_registers(ar, crash_data); spin_unlock_bh(&ar->data_lock); @@ -1937,7 +1937,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) { u32 addr, val; - addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; val = ath10k_pci_read32(ar, addr); val |= CORE_CTRL_CPU_INTR_MASK; ath10k_pci_write32(ar, addr, val); @@ -1973,7 +1973,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) } break; case QCA9377_1_0_DEVICE_ID: - return 2; + return 4; } ath10k_warn(ar, "unknown number of banks, assuming 1\n"); @@ -2799,7 +2799,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) done = ath10k_htt_txrx_compl_task(ar, budget); if (done < budget) { - napi_complete(ctx); + napi_complete_done(ctx, done); /* In case of MSI, it is possible that interrupts are received * while NAPI poll is inprogress. So pending interrupts that are * received after processing all copy engine pipes by NAPI poll @@ -3132,7 +3132,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar) setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); - if (QCA_REV_6174(ar)) + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); ret = ath10k_pci_alloc_pipes(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 9854ad56b2de..c76789d5de99 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -25,11 +25,6 @@ #include "ahb.h" /* - * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite - */ -#define DIAG_TRANSFER_LIMIT 2048 - -/* * maximum number of bytes that can be * handled atomically by DiagRead/DiagWrite */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index f304f6632c4f..f9188027a6f6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_pdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_vdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_peer *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -3631,6 +3637,7 @@ static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { .vht = WMI_TLV_PEER_VHT, .bw80 = WMI_TLV_PEER_80MHZ, .pmf = WMI_TLV_PEER_PMF, + .bw160 = WMI_TLV_PEER_160MHZ, }; /************/ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index b8aa6000573c..22cf011e839a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -543,6 +543,7 @@ enum wmi_tlv_peer_flags { WMI_TLV_PEER_VHT = 0x02000000, WMI_TLV_PEER_80MHZ = 0x04000000, WMI_TLV_PEER_PMF = 0x08000000, + WMI_TLV_PEER_160MHZ = 0x20000000, }; enum wmi_tlv_tag { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 50d6ee6afe26..414ad3e1eed4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -28,6 +28,7 @@ #include "wmi-ops.h" #include "p2p.h" #include "hw.h" +#include "hif.h" #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) @@ -1574,6 +1575,7 @@ static const struct wmi_peer_flags_map wmi_peer_flags_map = { .bw80 = WMI_PEER_80MHZ, .vht_2g = WMI_PEER_VHT_2G, .pmf = WMI_PEER_PMF, + .bw160 = WMI_PEER_160MHZ, }; static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { @@ -1591,6 +1593,7 @@ static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = { .spatial_mux = WMI_10X_PEER_SPATIAL_MUX, .vht = WMI_10X_PEER_VHT, .bw80 = WMI_10X_PEER_80MHZ, + .bw160 = WMI_10X_PEER_160MHZ, }; static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { @@ -1610,6 +1613,7 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = { .bw80 = WMI_10_2_PEER_80MHZ, .vht_2g = WMI_10_2_PEER_VHT_2G, .pmf = WMI_10_2_PEER_PMF, + .bw160 = WMI_10_2_PEER_160MHZ, }; void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, @@ -1634,7 +1638,10 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, ch->mhz = __cpu_to_le32(arg->freq); ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); - ch->band_center_freq2 = 0; + if (arg->mode == MODE_11AC_VHT80_80) + ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2); + else + ch->band_center_freq2 = 0; ch->min_power = arg->min_power; ch->max_power = arg->max_power; ch->reg_power = arg->max_reg_power; @@ -2319,7 +2326,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) */ if (channel >= 1 && channel <= 14) { status->band = NL80211_BAND_2GHZ; - } else if (channel >= 36 && channel <= 165) { + } else if (channel >= 36 && channel <= 169) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 5d3dff95b2e5..861c2d8c6e8c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1728,8 +1728,10 @@ enum wmi_phy_mode { MODE_11AC_VHT20_2G = 11, MODE_11AC_VHT40_2G = 12, MODE_11AC_VHT80_2G = 13, - MODE_UNKNOWN = 14, - MODE_MAX = 14 + MODE_11AC_VHT80_80 = 14, + MODE_11AC_VHT160 = 15, + MODE_UNKNOWN = 16, + MODE_MAX = 16 }; static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) @@ -1757,6 +1759,10 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) return "11ac-vht40"; case MODE_11AC_VHT80: return "11ac-vht80"; + case MODE_11AC_VHT160: + return "11ac-vht160"; + case MODE_11AC_VHT80_80: + return "11ac-vht80+80"; case MODE_11AC_VHT20_2G: return "11ac-vht20-2g"; case MODE_11AC_VHT40_2G: @@ -1811,6 +1817,7 @@ struct wmi_channel { struct wmi_channel_arg { u32 freq; u32 band_center_freq1; + u32 band_center_freq2; bool passive; bool allow_ibss; bool allow_ht; @@ -1875,9 +1882,18 @@ enum wmi_channel_change_cause { #define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 #define WMI_VHT_CAP_RX_LDPC 0x00000010 #define WMI_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_VHT_CAP_SGI_160MHZ 0x00000040 #define WMI_VHT_CAP_TX_STBC 0x00000080 #define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 #define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_VHT_CAP_SU_BFER 0x00000800 +#define WMI_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_VHT_CAP_MU_BFER 0x00080000 +#define WMI_VHT_CAP_MU_BFEE 0x00100000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23 #define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 @@ -1926,6 +1942,8 @@ enum { REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */ + REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */ + REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */ REGDMN_MODE_ALL = 0xffffffff }; @@ -5783,6 +5801,7 @@ enum wmi_peer_chwidth { WMI_PEER_CHWIDTH_20MHZ = 0, WMI_PEER_CHWIDTH_40MHZ = 1, WMI_PEER_CHWIDTH_80MHZ = 2, + WMI_PEER_CHWIDTH_160MHZ = 3, }; enum wmi_peer_param { @@ -5873,6 +5892,7 @@ struct wmi_peer_flags_map { u32 bw80; u32 vht_2g; u32 pmf; + u32 bw160; }; enum wmi_peer_flags { @@ -5892,6 +5912,7 @@ enum wmi_peer_flags { WMI_PEER_80MHZ = 0x04000000, WMI_PEER_VHT_2G = 0x08000000, WMI_PEER_PMF = 0x10000000, + WMI_PEER_160MHZ = 0x20000000 }; enum wmi_10x_peer_flags { @@ -5909,6 +5930,7 @@ enum wmi_10x_peer_flags { WMI_10X_PEER_SPATIAL_MUX = 0x00200000, WMI_10X_PEER_VHT = 0x02000000, WMI_10X_PEER_80MHZ = 0x04000000, + WMI_10X_PEER_160MHZ = 0x20000000 }; enum wmi_10_2_peer_flags { @@ -5928,6 +5950,7 @@ enum wmi_10_2_peer_flags { WMI_10_2_PEER_80MHZ = 0x04000000, WMI_10_2_PEER_VHT_2G = 0x08000000, WMI_10_2_PEER_PMF = 0x10000000, + WMI_10_2_PEER_160MHZ = 0x20000000 }; /* diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index dc44cfef7517..16e052d02c94 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; return -EOPNOTSUPP; default: - WARN_ON(1); - return -EINVAL; + return -EOPNOTSUPP; } mutex_lock(&ah->lock); diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 8ec66e74d06d..2195b1b7a8a6 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -713,7 +713,7 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) * that the packet is properly freed? */ if (s_req->busrequest) { - s_req->busrequest->scat_req = 0; + s_req->busrequest->scat_req = NULL; ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); } kfree(s_req->virt_dma_buf); diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 8f231c67dd51..783a38f1a626 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -3,8 +3,8 @@ config ATH9K_HW config ATH9K_COMMON tristate select ATH_COMMON - select DEBUG_FS - select RELAY +config ATH9K_COMMON_DEBUG + bool config ATH9K_DFS_DEBUGFS def_bool y depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED @@ -60,12 +60,14 @@ config ATH9K_DEBUGFS bool "Atheros ath9k debugging" depends on ATH9K && DEBUG_FS select MAC80211_DEBUGFS + select ATH9K_COMMON_DEBUG select RELAY ---help--- Say Y, if you need access to ath9k's statistics for interrupts, rate control, etc. Also required for changing debug message flags at run time. + As well as access to the FFT/spectral data and TX99. config ATH9K_STATION_STATISTICS bool "Detailed station statistics" @@ -174,8 +176,11 @@ config ATH9K_HTC config ATH9K_HTC_DEBUGFS bool "Atheros ath9k_htc debugging" depends on ATH9K_HTC && DEBUG_FS + select ATH9K_COMMON_DEBUG + select RELAY ---help--- Say Y, if you need access to ath9k_htc's statistics. + As well as access to the FFT/spectral data. config ATH9K_HWRNG bool "Random number generator support" diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 76f9dc37500b..36a40ffdce15 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -60,8 +60,9 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o ath9k_common-y:= common.o \ common-init.o \ common-beacon.o \ - common-debug.o \ - common-spectral.o + +ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \ + common-spectral.o ath9k_htc-y += htc_hst.o \ hif_usb.o \ diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 8eea8d22e72e..7922550c2159 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -524,7 +524,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah, return true; /* Setup rf parameters */ - eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); + eepMinorRev = ah->eep_ops->get_eeprom_rev(ah); for (i = 0; i < ah->iniBank6.ia_rows; i++) ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index d480d2f3e185..ae68f674829b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -108,8 +108,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) { u32 rxgain_type; - if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= - AR5416_EEP_MINOR_VER_17) { + if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_17) { rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE); if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) @@ -129,8 +128,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type) { - if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= - AR5416_EEP_MINOR_VER_19) { + if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) { if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9280Modes_high_power_tx_gain_9280_2); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index f816909d9474..4b3c9b108197 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ads->ds_txstatus6 = ads->ds_txstatus7 = 0; ads->ds_txstatus8 = ads->ds_txstatus9 = 0; - ACCESS_ONCE(ads->ds_link) = i->link; - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; + WRITE_ONCE(ads->ds_link, i->link); + WRITE_ONCE(ads->ds_data, i->buf_addr[0]); ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); ctl6 = SM(i->keytype, AR_EncrType); @@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) if ((i->is_first || i->is_last) && i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) + WRITE_ONCE(ads->ds_ctl2, set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) - | SM(0, AR_BurstDur); + | SM(0, AR_BurstDur)); - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) + WRITE_ONCE(ads->ds_ctl3, set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) - | set11nRate(i->rates, 3); + | set11nRate(i->rates, 3)); } else { - ACCESS_ONCE(ads->ds_ctl2) = 0; - ACCESS_ONCE(ads->ds_ctl3) = 0; + WRITE_ONCE(ads->ds_ctl2, 0); + WRITE_ONCE(ads->ds_ctl3, 0); } if (!i->is_first) { - ACCESS_ONCE(ads->ds_ctl0) = 0; - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + WRITE_ONCE(ads->ds_ctl0, 0); + WRITE_ONCE(ads->ds_ctl1, ctl1); + WRITE_ONCE(ads->ds_ctl6, ctl6); return; } @@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) break; } - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + WRITE_ONCE(ads->ds_ctl0, (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -287,29 +287,29 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : - (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0))); - ACCESS_ONCE(ads->ds_ctl1) = ctl1; - ACCESS_ONCE(ads->ds_ctl6) = ctl6; + WRITE_ONCE(ads->ds_ctl1, ctl1); + WRITE_ONCE(ads->ds_ctl6, ctl6); if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) return; - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) - | set11nPktDurRTSCTS(i->rates, 1); + WRITE_ONCE(ads->ds_ctl4, set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1)); - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) - | set11nPktDurRTSCTS(i->rates, 3); + WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3)); - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) - | SM(i->rtscts_rate, AR_RTSCTSRate); + | SM(i->rtscts_rate, AR_RTSCTSRate)); - ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3); + WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1)); + WRITE_ONCE(ads->ds_ctl10, SM(i->txpower[2], AR_XmitPower2)); + WRITE_ONCE(ads->ds_ctl11, SM(i->txpower[3], AR_XmitPower3)); } static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, @@ -318,7 +318,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, struct ar5416_desc *ads = AR5416DESC(ds); u32 status; - status = ACCESS_ONCE(ads->ds_txstatus9); + status = READ_ONCE(ads->ds_txstatus9); if ((status & AR_TxDone) == 0) return -EINPROGRESS; @@ -332,7 +332,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_rateindex = MS(status, AR_FinalTxIdx); ts->ts_seqnum = MS(status, AR_SeqNum); - status = ACCESS_ONCE(ads->ds_txstatus0); + status = READ_ONCE(ads->ds_txstatus0); ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00); ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01); ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02); @@ -342,7 +342,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ba_high = ads->AR_BaBitmapHigh; } - status = ACCESS_ONCE(ads->ds_txstatus1); + status = READ_ONCE(ads->ds_txstatus1); if (status & AR_FrmXmitOK) ts->ts_status |= ATH9K_TX_ACKED; else { @@ -371,7 +371,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_longretry = MS(status, AR_DataFailCnt); ts->ts_virtcol = MS(status, AR_VirtRetryCnt); - status = ACCESS_ONCE(ads->ds_txstatus5); + status = READ_ONCE(ads->ds_txstatus5); ts->ts_rssi = MS(status, AR_TxRSSICombined); ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10); ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11); @@ -390,13 +390,13 @@ static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index) switch (index) { case 0: - return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0); + return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur0); case 1: - return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1); + return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur1); case 2: - return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2); + return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur2); case 3: - return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3); + return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur3); default: return -1; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 08607d7fdb56..3dbfd86ebe36 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -53,7 +53,7 @@ static const struct ar9300_eeprom ar9300_default = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -631,7 +631,7 @@ static const struct ar9300_eeprom ar9300_x113 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -1210,7 +1210,7 @@ static const struct ar9300_eeprom ar9300_h112 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -1789,7 +1789,7 @@ static const struct ar9300_eeprom ar9300_x112 = { .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -2367,7 +2367,7 @@ static const struct ar9300_eeprom ar9300_h116 = { .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, - .eepMisc = 0, + .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN, }, .rfSilent = 0, .blueToothOptions = 0, @@ -3468,7 +3468,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01)); + PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & + AR5416_EEPMISC_BIG_ENDIAN)); PR_EEP("RF Silent", pBase->rfSilent); PR_EEP("BT option", pBase->blueToothOptions); PR_EEP("Device Cap", pBase->deviceCap); @@ -5497,6 +5498,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, } } +static u8 ar9003_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map4k.baseEepHeader.eepMisc; +} + const struct eeprom_ops eep_ar9300_ops = { .check_eeprom = ath9k_hw_ar9300_check_eeprom, .get_eeprom = ath9k_hw_ar9300_get_eeprom, @@ -5507,5 +5513,6 @@ const struct eeprom_ops eep_ar9300_ops = { .set_board_values = ath9k_hw_ar9300_set_board_values, .set_addac = ath9k_hw_ar9300_set_addac, .set_txpower = ath9k_hw_ar9300_set_txpower, - .get_spur_channel = ath9k_hw_ar9300_get_spur_channel + .get_spur_channel = ath9k_hw_ar9300_get_spur_channel, + .get_eepmisc = ar9003_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 107bcfbbe0fb..7dc7205dc877 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -38,7 +38,6 @@ #define AR9300_NUM_CTLS_2G 12 #define AR9300_NUM_BAND_EDGES_5G 8 #define AR9300_NUM_BAND_EDGES_2G 4 -#define AR9300_EEPMISC_BIG_ENDIAN 0x01 #define AR9300_EEPMISC_WOW 0x02 #define AR9300_CUSTOMER_DATA_SIZE 20 @@ -70,6 +69,9 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff +/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */ +#define AR9300_EEPMISC_LITTLE_ENDIAN 0 + #define AR9300_OTP_BASE \ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) #define AR9300_OTP_STATUS \ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index da84b705cbcd..cc5bb0a76baf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) (i->qcu << AR_TxQcuNum_S) | desc_len; checksum += val; - ACCESS_ONCE(ads->info) = val; + WRITE_ONCE(ads->info, val); checksum += i->link; - ACCESS_ONCE(ads->link) = i->link; + WRITE_ONCE(ads->link, i->link); checksum += i->buf_addr[0]; - ACCESS_ONCE(ads->data0) = i->buf_addr[0]; + WRITE_ONCE(ads->data0, i->buf_addr[0]); checksum += i->buf_addr[1]; - ACCESS_ONCE(ads->data1) = i->buf_addr[1]; + WRITE_ONCE(ads->data1, i->buf_addr[1]); checksum += i->buf_addr[2]; - ACCESS_ONCE(ads->data2) = i->buf_addr[2]; + WRITE_ONCE(ads->data2, i->buf_addr[2]); checksum += i->buf_addr[3]; - ACCESS_ONCE(ads->data3) = i->buf_addr[3]; + WRITE_ONCE(ads->data3, i->buf_addr[3]); checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl3) = val; + WRITE_ONCE(ads->ctl3, val); checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl5) = val; + WRITE_ONCE(ads->ctl5, val); checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl7) = val; + WRITE_ONCE(ads->ctl7, val); checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); - ACCESS_ONCE(ads->ctl9) = val; + WRITE_ONCE(ads->ctl9, val); checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); - ACCESS_ONCE(ads->ctl10) = checksum; + WRITE_ONCE(ads->ctl10, checksum); if (i->is_first || i->is_last) { - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) + WRITE_ONCE(ads->ctl13, set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) - | SM(0, AR_BurstDur); + | SM(0, AR_BurstDur)); - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) + WRITE_ONCE(ads->ctl14, set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) - | set11nRate(i->rates, 3); + | set11nRate(i->rates, 3)); } else { - ACCESS_ONCE(ads->ctl13) = 0; - ACCESS_ONCE(ads->ctl14) = 0; + WRITE_ONCE(ads->ctl13, 0); + WRITE_ONCE(ads->ctl14, 0); } ads->ctl20 = 0; @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ctl17 = SM(i->keytype, AR_EncrType); if (!i->is_first) { - ACCESS_ONCE(ads->ctl11) = 0; - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; - ACCESS_ONCE(ads->ctl15) = 0; - ACCESS_ONCE(ads->ctl16) = 0; - ACCESS_ONCE(ads->ctl17) = ctl17; - ACCESS_ONCE(ads->ctl18) = 0; - ACCESS_ONCE(ads->ctl19) = 0; + WRITE_ONCE(ads->ctl11, 0); + WRITE_ONCE(ads->ctl12, i->is_last ? 0 : AR_TxMore); + WRITE_ONCE(ads->ctl15, 0); + WRITE_ONCE(ads->ctl16, 0); + WRITE_ONCE(ads->ctl17, ctl17); + WRITE_ONCE(ads->ctl18, 0); + WRITE_ONCE(ads->ctl19, 0); return; } - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) + WRITE_ONCE(ads->ctl11, (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower[0], AR_XmitPower0) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) @@ -107,7 +107,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0) | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : - (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0))); ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0) @@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; ctl12 |= SM(val, AR_PAPRDChainMask); - ACCESS_ONCE(ads->ctl12) = ctl12; - ACCESS_ONCE(ads->ctl17) = ctl17; + WRITE_ONCE(ads->ctl12, ctl12); + WRITE_ONCE(ads->ctl17, ctl17); - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) - | set11nPktDurRTSCTS(i->rates, 1); + WRITE_ONCE(ads->ctl15, set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1)); - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) - | set11nPktDurRTSCTS(i->rates, 3); + WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3)); - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) + WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) - | SM(i->rtscts_rate, AR_RTSCTSRate); + | SM(i->rtscts_rate, AR_RTSCTSRate)); - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; + WRITE_ONCE(ads->ctl19, AR_Not_Sounding); - ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1); - ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2); - ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3); + WRITE_ONCE(ads->ctl20, SM(i->txpower[1], AR_XmitPower1)); + WRITE_ONCE(ads->ctl21, SM(i->txpower[2], AR_XmitPower2)); + WRITE_ONCE(ads->ctl22, SM(i->txpower[3], AR_XmitPower3)); } static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) @@ -359,7 +359,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ads = &ah->ts_ring[ah->ts_tail]; - status = ACCESS_ONCE(ads->status8); + status = READ_ONCE(ads->status8); if ((status & AR_TxDone) == 0) return -EINPROGRESS; @@ -385,7 +385,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, if (status & AR_TxOpExceeded) ts->ts_status |= ATH9K_TXERR_XTXOP; - status = ACCESS_ONCE(ads->status2); + status = READ_ONCE(ads->status2); ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00); ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01); ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02); @@ -395,7 +395,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ba_high = ads->status6; } - status = ACCESS_ONCE(ads->status3); + status = READ_ONCE(ads->status3); if (status & AR_ExcessiveRetries) ts->ts_status |= ATH9K_TXERR_XRETRY; if (status & AR_Filtered) @@ -420,7 +420,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_longretry = MS(status, AR_DataFailCnt); ts->ts_virtcol = MS(status, AR_VirtRetryCnt); - status = ACCESS_ONCE(ads->status7); + status = READ_ONCE(ads->status7); ts->ts_rssi = MS(status, AR_TxRSSICombined); ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10); ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11); @@ -437,13 +437,13 @@ static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index) switch (index) { case 0: - return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0); + return MS(READ_ONCE(adc->ctl15), AR_PacketDur0); case 1: - return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1); + return MS(READ_ONCE(adc->ctl15), AR_PacketDur1); case 2: - return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2); + return MS(READ_ONCE(adc->ctl16), AR_PacketDur2); case 3: - return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3); + return MS(READ_ONCE(adc->ctl16), AR_PacketDur3); default: return 0; } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 378d3458fddb..331947b6a667 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -112,6 +112,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_TXFIFO_DEPTH 8 #define ATH_TX_ERROR 0x01 +#define ATH_AIRTIME_QUANTUM 300 /* usec */ + /* Stop tx traffic 1ms before the GO goes away */ #define ATH_P2P_PS_STOP_TIME 1000 @@ -247,6 +249,9 @@ struct ath_atx_tid { bool has_queued; }; +void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); +void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); + struct ath_node { struct ath_softc *sc; struct ieee80211_sta *sta; /* station struct we're part of */ @@ -258,9 +263,12 @@ struct ath_node { bool sleeping; bool no_ps_filter; + s64 airtime_deficit[IEEE80211_NUM_ACS]; + u32 airtime_rx_start; #ifdef CONFIG_ATH9K_STATION_STATISTICS struct ath_rx_rate_stats rx_rate_stats; + struct ath_airtime_stats airtime_stats; #endif u8 key_idx[4]; @@ -317,10 +325,16 @@ struct ath_rx { /* Channel Context */ /*******************/ +struct ath_acq { + struct list_head acq_new; + struct list_head acq_old; + spinlock_t lock; +}; + struct ath_chanctx { struct cfg80211_chan_def chandef; struct list_head vifs; - struct list_head acq[IEEE80211_NUM_ACS]; + struct ath_acq acq[IEEE80211_NUM_ACS]; int hw_queue_base; /* do not dereference, use for comparison only */ @@ -555,6 +569,15 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc, #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ +static inline void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) +{ + spin_lock_bh(&txq->axq_lock); +} +static inline void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) +{ + spin_unlock_bh(&txq->axq_lock); +} + void ath_startrecv(struct ath_softc *sc); bool ath_stoprecv(struct ath_softc *sc); u32 ath_calcrxfilter(struct ath_softc *sc); @@ -562,8 +585,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs); void ath_rx_cleanup(struct ath_softc *sc); int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); -void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq); -void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq); void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq); void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); bool ath_drain_all_txq(struct ath_softc *sc); @@ -575,6 +596,8 @@ void ath_txq_schedule_all(struct ath_softc *sc); int ath_tx_init(struct ath_softc *sc, int nbufs); int ath_txq_update(struct ath_softc *sc, int qnum, struct ath9k_tx_queue_info *q); +u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, + int width, int half_gi, bool shortPreamble); void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); void ath_assign_seq(struct ath_common *common, struct sk_buff *skb); int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -963,6 +986,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */ +#define AIRTIME_USE_TX BIT(0) +#define AIRTIME_USE_RX BIT(1) +#define AIRTIME_USE_NEW_QUEUES BIT(2) +#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX))) + struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -1005,6 +1033,8 @@ struct ath_softc { short nbcnvifs; unsigned long ps_usecount; + u16 airtime_flags; /* AIRTIME_* */ + struct ath_rx rx; struct ath_tx tx; struct ath_beacon beacon; diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 929dd70f48eb..b84539d89f1a 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -118,8 +118,11 @@ void ath_chanctx_init(struct ath_softc *sc) INIT_LIST_HEAD(&ctx->vifs); ctx->txpower = ATH_TXPOWER_MAX; ctx->flush_timeout = HZ / 5; /* 200ms */ - for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) - INIT_LIST_HEAD(&ctx->acq[j]); + for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) { + INIT_LIST_HEAD(&ctx->acq[j].acq_new); + INIT_LIST_HEAD(&ctx->acq[j].acq_old); + spin_lock_init(&ctx->acq[j].lock); + } } } @@ -1345,8 +1348,11 @@ void ath9k_offchannel_init(struct ath_softc *sc) ctx->txpower = ATH_TXPOWER_MAX; cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); - for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) - INIT_LIST_HEAD(&ctx->acq[i]); + for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) { + INIT_LIST_HEAD(&ctx->acq[i].acq_new); + INIT_LIST_HEAD(&ctx->acq[i].acq_old); + spin_lock_init(&ctx->acq[i].lock); + } sc->offchannel.chan.offchannel = true; } diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h index 7c9788490f7f..3376990d3a24 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.h +++ b/drivers/net/wireless/ath/ath9k/common-debug.h @@ -60,6 +60,7 @@ struct ath_rx_stats { u32 rx_spectral; }; +#ifdef CONFIG_ATH9K_COMMON_DEBUG void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy, struct ath_hw *ah); void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy, @@ -70,3 +71,29 @@ void ath9k_cmn_debug_recv(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats); void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy, struct ath_rx_stats *rxstats); +#else +static inline void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy, + struct ath_hw *ah) +{ +} + +static inline void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy, + struct ath_hw *ah) +{ +} + +static inline void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats, + struct ath_rx_status *rs) +{ +} + +static inline void ath9k_cmn_debug_recv(struct dentry *debugfs_phy, + struct ath_rx_stats *rxstats) +{ +} + +static inline void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy, + struct ath_rx_stats *rxstats) +{ +} +#endif /* CONFIG_ATH9K_COMMON_DEBUG */ diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index eedf86b67cf5..789a3dbe8341 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -1075,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) { - if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { + if (spec_priv->rfs_chan_spec_scan) { relay_close(spec_priv->rfs_chan_spec_scan); spec_priv->rfs_chan_spec_scan = NULL; } diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h index 998743be9c67..5d1a51d83aa6 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.h +++ b/drivers/net/wireless/ath/ath9k/common-spectral.h @@ -151,6 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } +#ifdef CONFIG_ATH9K_COMMON_DEBUG void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy); void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv); @@ -161,5 +162,27 @@ int ath9k_cmn_spectral_scan_config(struct ath_common *common, enum spectral_mode spectral_mode); int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr, struct ath_rx_status *rs, u64 tsf); +#else +static inline void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, + struct dentry *debugfs_phy) +{ +} + +static inline void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) +{ +} + +static inline void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, + struct ath_spec_scan_priv *spec_priv) +{ +} + +static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, + struct ieee80211_hdr *hdr, + struct ath_rx_status *rs, u64 tsf) +{ + return 0; +} +#endif /* CONFIG_ATH9K_COMMON_DEBUG */ #endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 89a94dd5f2cb..43930c336987 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1399,5 +1399,8 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("tpc", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_tpc); + debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, &sc->airtime_flags); + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index a078cdd3170d..249f8141cd00 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -221,6 +221,11 @@ struct ath_rx_rate_stats { } cck_stats[4]; }; +struct ath_airtime_stats { + u32 rx_airtime; + u32 tx_airtime; +}; + #define ANT_MAIN 0 #define ANT_ALT 1 @@ -314,12 +319,20 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause) void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb); +void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, u32 tx); #else static inline void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { } +static inline void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, u32 tx) +{ +} #endif /* CONFIG_ATH9K_STATION_STATISTICS */ #endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index 2a3a3c4671bc..524cbf13ca9c 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -242,6 +242,59 @@ static const struct file_operations fops_node_recv = { .llseek = default_llseek, }; +void ath_debug_airtime(struct ath_softc *sc, + struct ath_node *an, + u32 rx, + u32 tx) +{ + struct ath_airtime_stats *astats = &an->airtime_stats; + + astats->rx_airtime += rx; + astats->tx_airtime += tx; +} + +static ssize_t read_airtime(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_node *an = file->private_data; + struct ath_airtime_stats *astats; + static const char *qname[4] = { + "VO", "VI", "BE", "BK" + }; + u32 len = 0, size = 256; + char *buf; + size_t retval; + int i; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + astats = &an->airtime_stats; + + len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime); + len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime); + len += scnprintf(buf + len, size - len, "Deficit: "); + for (i = 0; i < 4; i++) + len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]); + if (len < size) + buf[len++] = '\n'; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + + +static const struct file_operations fops_airtime = { + .read = read_airtime, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + + void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -251,4 +304,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr); debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv); + debugfs_create_file("airtime", S_IRUGO, dir, an, &fops_airtime); } diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a449588a8009..fb80ec86e53d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -160,6 +160,7 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) u16 magic; u16 *eepdata; int i; + bool needs_byteswap = false; struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { @@ -167,31 +168,40 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) return -EIO; } - *swap_needed = false; if (swab16(magic) == AR5416_EEPROM_MAGIC) { + needs_byteswap = true; + ath_dbg(common, EEPROM, + "EEPROM needs byte-swapping to correct endianness.\n"); + } else if (magic != AR5416_EEPROM_MAGIC) { + if (ath9k_hw_use_flash(ah)) { + ath_dbg(common, EEPROM, + "Ignoring invalid EEPROM magic (0x%04x).\n", + magic); + } else { + ath_err(common, + "Invalid EEPROM magic (0x%04x).\n", magic); + return -EINVAL; + } + } + + if (needs_byteswap) { if (ah->ah_flags & AH_NO_EEP_SWAP) { ath_info(common, "Ignoring endianness difference in EEPROM magic bytes.\n"); } else { - *swap_needed = true; - } - } else if (magic != AR5416_EEPROM_MAGIC) { - if (ath9k_hw_use_flash(ah)) - return 0; + eepdata = (u16 *)(&ah->eeprom); - ath_err(common, - "Invalid EEPROM Magic (0x%04x).\n", magic); - return -EINVAL; + for (i = 0; i < size; i++) + eepdata[i] = swab16(eepdata[i]); + } } - eepdata = (u16 *)(&ah->eeprom); - - if (*swap_needed) { + if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) { + *swap_needed = true; ath_dbg(common, EEPROM, - "EEPROM Endianness is not native.. Changing.\n"); - - for (i = 0; i < size; i++) - eepdata[i] = swab16(eepdata[i]); + "Big Endian EEPROM detected according to EEPMISC register.\n"); + } else { + *swap_needed = false; } return 0; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 4465c6566f20..30bf722e33ed 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -23,6 +23,17 @@ #include <net/cfg80211.h> #include "ar9003_eeprom.h" +/* helpers to swap EEPROM fields, which are stored as __le16 or __le32. Since + * we are 100% sure about it we __force these to u16/u32 for the swab calls to + * silence the sparse checks. These macros are used when we have a Big Endian + * EEPROM (according to AR5416_EEPMISC_BIG_ENDIAN) and need to convert the + * fields to __le16/__le32. + */ +#define EEPROM_FIELD_SWAB16(field) \ + (field = (__force __le16)swab16((__force u16)field)) +#define EEPROM_FIELD_SWAB32(field) \ + (field = (__force __le32)swab32((__force u32)field)) + #ifdef __BIG_ENDIAN #define AR5416_EEPROM_MAGIC 0x5aa5 #else @@ -99,7 +110,6 @@ #define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) #define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) -#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) #define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) #define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \ @@ -121,6 +131,8 @@ #define AR5416_EEP_NO_BACK_VER 0x1 #define AR5416_EEP_VER 0xE +#define AR5416_EEP_VER_MAJOR_SHIFT 12 +#define AR5416_EEP_VER_MAJOR_MASK 0xF000 #define AR5416_EEP_VER_MINOR_MASK 0x0FFF #define AR5416_EEP_MINOR_VER_2 0x2 #define AR5416_EEP_MINOR_VER_3 0x3 @@ -161,6 +173,9 @@ #define AR5416_EEP_TXGAIN_ORIGINAL 0 #define AR5416_EEP_TXGAIN_HIGH_POWER 1 +/* Endianness of EEPROM content */ +#define AR5416_EEPMISC_BIG_ENDIAN 0x01 + #define AR5416_EEP4K_START_LOC 64 #define AR5416_EEP4K_NUM_2G_CAL_PIERS 3 #define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3 @@ -174,7 +189,6 @@ #define AR9280_TX_GAIN_TABLE_SIZE 22 #define AR9287_EEP_VER 0xE -#define AR9287_EEP_VER_MINOR_MASK 0xFFF #define AR9287_EEP_MINOR_VER_1 0x1 #define AR9287_EEP_MINOR_VER_2 0x2 #define AR9287_EEP_MINOR_VER_3 0x3 @@ -191,7 +205,6 @@ #define AR9287_NUM_CTLS 12 #define AR9287_NUM_BAND_EDGES 4 #define AR9287_PD_GAIN_ICEPTS 1 -#define AR9287_EEPMISC_BIG_ENDIAN 0x01 #define AR9287_EEPMISC_WOW 0x02 #define AR9287_MAX_CHAINS 2 #define AR9287_ANT_16S 32 @@ -228,7 +241,6 @@ enum eeprom_param { EEP_DB_5, EEP_OB_2, EEP_DB_2, - EEP_MINOR_REV, EEP_TX_MASK, EEP_RX_MASK, EEP_FSTCLK_5G, @@ -269,19 +281,19 @@ enum ath9k_hal_freq_band { }; struct base_eep_header { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 pwdclkind; u8 fastClk5g; @@ -299,33 +311,33 @@ struct base_eep_header { } __packed; struct base_eep_header_4k { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 txGainType; } __packed; struct spur_chan { - u16 spurChan; + __le16 spurChan; u8 spurRangeLow; u8 spurRangeHigh; } __packed; struct modal_eep_header { - u32 antCtrlChain[AR5416_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR5416_MAX_CHAINS]; + __le32 antCtrlCommon; u8 antennaGainCh[AR5416_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR5416_MAX_CHAINS]; @@ -360,7 +372,7 @@ struct modal_eep_header { u8 db_ch1; u8 lna_ctl; u8 miscBits; - u16 xpaBiasLvlFreq[3]; + __le16 xpaBiasLvlFreq[3]; u8 futureModal[6]; struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS]; @@ -374,8 +386,8 @@ struct calDataPerFreqOpLoop { } __packed; struct modal_eep_4k_header { - u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS]; + __le32 antCtrlCommon; u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS]; @@ -439,19 +451,19 @@ struct modal_eep_4k_header { } __packed; struct base_eep_ar9287_header { - u16 length; - u16 checksum; - u16 version; + __le16 length; + __le16 checksum; + __le16 version; u8 opCapFlags; u8 eepMisc; - u16 regDmn[2]; + __le16 regDmn[2]; u8 macAddr[6]; u8 rxMask; u8 txMask; - u16 rfSilent; - u16 blueToothOptions; - u16 deviceCap; - u32 binBuildNumber; + __le16 rfSilent; + __le16 blueToothOptions; + __le16 deviceCap; + __le32 binBuildNumber; u8 deviceType; u8 openLoopPwrCntl; int8_t pwrTableOffset; @@ -461,8 +473,8 @@ struct base_eep_ar9287_header { } __packed; struct modal_eep_ar9287_header { - u32 antCtrlChain[AR9287_MAX_CHAINS]; - u32 antCtrlCommon; + __le32 antCtrlChain[AR9287_MAX_CHAINS]; + __le32 antCtrlCommon; int8_t antennaGainCh[AR9287_MAX_CHAINS]; u8 switchSettling; u8 txRxAttenCh[AR9287_MAX_CHAINS]; @@ -653,6 +665,7 @@ struct eeprom_ops { u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test); u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); + u8 (*get_eepmisc)(struct ath_hw *ah); }; void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 5da0826bf1be..b8c0a08066a0 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -20,12 +20,17 @@ static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { - return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); + u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version); + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { - return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); + u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version); + + return version & AR5416_EEP_VER_MINOR_MASK; } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) @@ -67,12 +72,12 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_4k_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]); @@ -127,6 +132,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -136,12 +142,12 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Major Version", ath9k_hw_4k_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_4k_get_eeprom_rev(ah)); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -154,10 +160,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("TX Gain type", pBase->txGainType); len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", @@ -189,54 +195,31 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_4K); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - u32 integer; - u16 word; - - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; - - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; - - integer = swab32(eep->modalHeader.antCtrlCommon); - eep->modalHeader.antCtrlCommon = integer; - - for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { - integer = swab32(eep->modalHeader.antCtrlChain[i]); - eep->modalHeader.antCtrlChain[i] = integer; - } - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(eep->modalHeader.spurChans[i].spurChan); - eep->modalHeader.spurChans[i].spurChan = word; - } + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon); + + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + eep->modalHeader.spurChans[i].spurChan); } if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER, @@ -254,9 +237,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; - u16 ver_minor; - - ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: @@ -268,19 +248,17 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; + return le16_to_cpu(pBase->rfSilent); case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; - case EEP_MINOR_REV: - return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: @@ -319,14 +297,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, xpdMask = pEepData->modalHeader.xpdGain; - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; - } else { + else pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); - } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; @@ -612,10 +588,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; - } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, @@ -728,15 +702,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah, { ENABLE_REG_RMW_BUFFER(ah); REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, - pModal->antCtrlChain[0], 0); + le32_to_cpu(pModal->antCtrlChain[0]), 0); REG_RMW(ah, AR_PHY_TIMING_CTRL4(0), SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF), AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF); - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, @@ -795,7 +768,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, pModal = &eep->modalHeader; txRxAttenLocal = 23; - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); + REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon)); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); @@ -1014,16 +987,14 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } - if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, @@ -1061,7 +1032,12 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan; + return le16_to_cpu(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan); +} + +static u8 ath9k_hw_4k_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map4k.baseEepHeader.eepMisc; } const struct eeprom_ops eep_4k_ops = { @@ -1073,5 +1049,6 @@ const struct eeprom_ops eep_4k_ops = { .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_txpower = ath9k_hw_4k_set_txpower, - .get_spur_channel = ath9k_hw_4k_get_spur_channel + .get_spur_channel = ath9k_hw_4k_get_spur_channel, + .get_eepmisc = ath9k_hw_4k_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 1a019a39eda1..3caa149b1013 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -22,12 +22,17 @@ static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { - return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF; + u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version); + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) { - return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; + u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version); + + return version & AR5416_EEP_VER_MINOR_MASK; } static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) @@ -70,13 +75,13 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_ar9287_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_ar9287_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); PR_EEP("Switch Settle", modal_hdr->switchSettling); @@ -123,6 +128,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -132,12 +138,12 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Major Version", ath9k_hw_ar9287_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_ar9287_get_eeprom_rev(ah)); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -150,10 +156,10 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("Power Table Offset", pBase->pwrTableOffset); PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); @@ -177,8 +183,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { - u32 el, integer; - u16 word; + u32 el; int i, err; bool need_swap; struct ar9287_eeprom *eep = &ah->eeprom.map9287; @@ -188,51 +193,31 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_AR9287); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; - - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; - - integer = swab32(eep->modalHeader.antCtrlCommon); - eep->modalHeader.antCtrlCommon = integer; - - for (i = 0; i < AR9287_MAX_CHAINS; i++) { - integer = swab32(eep->modalHeader.antCtrlChain[i]); - eep->modalHeader.antCtrlChain[i] = integer; - } - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(eep->modalHeader.spurChans[i].spurChan); - eep->modalHeader.spurChans[i].spurChan = word; - } + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon); + + for (i = 0; i < AR9287_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + eep->modalHeader.spurChans[i].spurChan); } if (!ath9k_hw_nvram_check_version(ah, AR9287_EEP_VER, @@ -250,9 +235,7 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &eep->modalHeader; struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; - u16 ver_minor; - - ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK; + u16 ver_minor = ath9k_hw_ar9287_get_eeprom_rev(ah); switch (param) { case EEP_NFTHRESH_2: @@ -264,15 +247,13 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; - case EEP_MINOR_REV: - return ver_minor; + return le16_to_cpu(pBase->rfSilent); case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: @@ -387,8 +368,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah, xpdMask = pEepData->modalHeader.xpdGain; - if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= - AR9287_EEP_MINOR_VER_2) + if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2) pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; else pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), @@ -737,8 +717,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= - AR9287_EEP_MINOR_VER_2) + if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; ath9k_hw_set_ar9287_power_per_rate_table(ah, chan, @@ -879,13 +858,13 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah, pModal = &eep->modalHeader; - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); + REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon)); for (i = 0; i < AR9287_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, - pModal->antCtrlChain[i]); + le32_to_cpu(pModal->antCtrlChain[i])); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) @@ -983,7 +962,14 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah, static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan; + __le16 spur_ch = ah->eeprom.map9287.modalHeader.spurChans[i].spurChan; + + return le16_to_cpu(spur_ch); +} + +static u8 ath9k_hw_ar9287_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.map9287.baseEepHeader.eepMisc; } const struct eeprom_ops eep_ar9287_ops = { @@ -995,5 +981,6 @@ const struct eeprom_ops eep_ar9287_ops = { .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev, .set_board_values = ath9k_hw_ar9287_set_board_values, .set_txpower = ath9k_hw_ar9287_set_txpower, - .get_spur_channel = ath9k_hw_ar9287_get_spur_channel + .get_spur_channel = ath9k_hw_ar9287_get_spur_channel, + .get_eepmisc = ath9k_hw_ar9287_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 959682f7909c..56b44fc7a8e6 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -79,12 +79,17 @@ static void ath9k_olc_get_pdadcs(struct ath_hw *ah, static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah) { - return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF); + u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version); + + return (version & AR5416_EEP_VER_MAJOR_MASK) >> + AR5416_EEP_VER_MAJOR_SHIFT; } static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) { - return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); + u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version); + + return version & AR5416_EEP_VER_MINOR_MASK; } #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) @@ -126,14 +131,14 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) return __ath9k_hw_def_fill_eeprom(ah); } -#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) +#ifdef CONFIG_ATH9K_COMMON_DEBUG static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); - PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]); - PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]); - PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); + PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2])); + PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]); @@ -189,9 +194,9 @@ static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1); PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1); PR_EEP("LNA Control", modal_hdr->lna_ctl); - PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]); - PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]); - PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]); + PR_EEP("XPA Bias Freq0", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[0])); + PR_EEP("XPA Bias Freq1", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[1])); + PR_EEP("XPA Bias Freq2", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[2])); return len; } @@ -201,6 +206,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct base_eep_header *pBase = &eep->baseEepHeader; + u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber); if (!dump_base_hdr) { len += scnprintf(buf + len, size - len, @@ -214,12 +220,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, goto out; } - PR_EEP("Major Version", pBase->version >> 12); - PR_EEP("Minor Version", pBase->version & 0xFFF); - PR_EEP("Checksum", pBase->checksum); - PR_EEP("Length", pBase->length); - PR_EEP("RegDomain1", pBase->regDmn[0]); - PR_EEP("RegDomain2", pBase->regDmn[1]); + PR_EEP("Major Version", ath9k_hw_def_get_eeprom_ver(ah)); + PR_EEP("Minor Version", ath9k_hw_def_get_eeprom_rev(ah)); + PR_EEP("Checksum", le16_to_cpu(pBase->checksum)); + PR_EEP("Length", le16_to_cpu(pBase->length)); + PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); + PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); @@ -232,10 +238,10 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); - PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); - PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); - PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); - PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); + PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN)); + PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF); + PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF); + PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF); PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl); len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", @@ -268,61 +274,40 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) return err; if (need_swap) - el = swab16(eep->baseEepHeader.length); + el = swab16((__force u16)eep->baseEepHeader.length); else - el = eep->baseEepHeader.length; + el = le16_to_cpu(eep->baseEepHeader.length); el = min(el / sizeof(u16), SIZE_EEPROM_DEF); if (!ath9k_hw_nvram_validate_checksum(ah, el)) return -EINVAL; if (need_swap) { - u32 integer, j; - u16 word; - - word = swab16(eep->baseEepHeader.length); - eep->baseEepHeader.length = word; - - word = swab16(eep->baseEepHeader.checksum); - eep->baseEepHeader.checksum = word; - - word = swab16(eep->baseEepHeader.version); - eep->baseEepHeader.version = word; - - word = swab16(eep->baseEepHeader.regDmn[0]); - eep->baseEepHeader.regDmn[0] = word; - - word = swab16(eep->baseEepHeader.regDmn[1]); - eep->baseEepHeader.regDmn[1] = word; - - word = swab16(eep->baseEepHeader.rfSilent); - eep->baseEepHeader.rfSilent = word; - - word = swab16(eep->baseEepHeader.blueToothOptions); - eep->baseEepHeader.blueToothOptions = word; + u32 j; - word = swab16(eep->baseEepHeader.deviceCap); - eep->baseEepHeader.deviceCap = word; + EEPROM_FIELD_SWAB16(eep->baseEepHeader.length); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.version); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions); + EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap); for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) { struct modal_eep_header *pModal = &eep->modalHeader[j]; - integer = swab32(pModal->antCtrlCommon); - pModal->antCtrlCommon = integer; + EEPROM_FIELD_SWAB32(pModal->antCtrlCommon); - for (i = 0; i < AR5416_MAX_CHAINS; i++) { - integer = swab32(pModal->antCtrlChain[i]); - pModal->antCtrlChain[i] = integer; - } - for (i = 0; i < 3; i++) { - word = swab16(pModal->xpaBiasLvlFreq[i]); - pModal->xpaBiasLvlFreq[i] = word; - } + for (i = 0; i < AR5416_MAX_CHAINS; i++) + EEPROM_FIELD_SWAB32(pModal->antCtrlChain[i]); - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - word = swab16(pModal->spurChans[i].spurChan); - pModal->spurChans[i].spurChan = word; - } + for (i = 0; i < 3; i++) + EEPROM_FIELD_SWAB16(pModal->xpaBiasLvlFreq[i]); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) + EEPROM_FIELD_SWAB16( + pModal->spurChans[i].spurChan); } } @@ -332,7 +317,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) /* Enable fixup for AR_AN_TOP2 if necessary */ if ((ah->hw_version.devid == AR9280_DEVID_PCI) && - ((eep->baseEepHeader.version & 0xff) > 0x0a) && + ((le16_to_cpu(eep->baseEepHeader.version) & 0xff) > 0x0a) && (eep->baseEepHeader.pwdclkind == 0)) ah->need_an_top2_fixup = true; @@ -365,13 +350,13 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: - return pBase->regDmn[0]; + return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: - return pBase->deviceCap; + return le16_to_cpu(pBase->deviceCap); case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: - return pBase->rfSilent; + return le16_to_cpu(pBase->rfSilent); case EEP_OB_5: return pModal[0].ob; case EEP_DB_5: @@ -380,8 +365,6 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, return pModal[1].ob; case EEP_DB_2: return pModal[1].db; - case EEP_MINOR_REV: - return AR5416_VER_MASK; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: @@ -393,27 +376,27 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, case EEP_TXGAIN_TYPE: return pBase->txGainType; case EEP_OL_PWRCTRL: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) return pBase->openLoopPwrCntl ? true : false; else return false; case EEP_RC_CHAIN_MASK: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) return pBase->rcChainMask; else return 0; case EEP_DAC_HPWR_5G: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) return pBase->dacHiPwrMode_5G; else return 0; case EEP_FRAC_N_5G: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_22) return pBase->frac_n_5g; else return 0; case EEP_PWR_TABLE_OFFSET: - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21) + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_21) return pBase->pwr_table_offset; else return AR5416_PWR_TABLE_OFFSET_DB; @@ -436,7 +419,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah, u8 txRxAttenLocal, int regChainOffset, int i) { ENABLE_REG_RMW_BUFFER(ah); - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[i]; if (AR_SREV_9280_20_OR_LATER(ah)) { @@ -487,11 +470,13 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, struct ar5416_eeprom_def *eep = &ah->eeprom.def; int i, regChainOffset; u8 txRxAttenLocal; + u32 antCtrlCommon; pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; + antCtrlCommon = le32_to_cpu(pModal->antCtrlCommon); - REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff); + REG_WRITE(ah, AR_PHY_SWITCH_COM, antCtrlCommon & 0xffff); for (i = 0; i < AR5416_MAX_CHAINS; i++) { if (AR_SREV_9280(ah)) { @@ -505,7 +490,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, regChainOffset = i * 0x1000; REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, - pModal->antCtrlChain[i]); + le32_to_cpu(pModal->antCtrlChain[i])); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & @@ -605,7 +590,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, pModal->thresh62); } - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); @@ -613,7 +598,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, pModal->txFrameToPaOn); } - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, @@ -621,13 +606,14 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, } if (AR_SREV_9280_20_OR_LATER(ah) && - AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, pModal->miscBits); - if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { + if (AR_SREV_9280_20(ah) && + ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) { if (IS_CHAN_2GHZ(chan)) REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, eep->baseEepHeader.dacLpMode); @@ -651,7 +637,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, static void ath9k_hw_def_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { -#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt]) +#define XPA_LVL_FREQ(cnt) (le16_to_cpu(pModal->xpaBiasLvlFreq[cnt])) struct modal_eep_header *pModal; struct ar5416_eeprom_def *eep = &ah->eeprom.def; u8 biaslevel; @@ -798,8 +784,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader[modalIdx].pdGainOverlap; } else { @@ -1171,10 +1156,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, memset(ratesArray, 0, sizeof(ratesArray)); - if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= - AR5416_EEP_MINOR_VER_2) { + if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; - } ath9k_hw_set_def_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, @@ -1314,7 +1297,14 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah, static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { - return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan; + __le16 spch = ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan; + + return le16_to_cpu(spch); +} + +static u8 ath9k_hw_def_get_eepmisc(struct ath_hw *ah) +{ + return ah->eeprom.def.baseEepHeader.eepMisc; } const struct eeprom_ops eep_def_ops = { @@ -1327,5 +1317,6 @@ const struct eeprom_ops eep_def_ops = { .set_board_values = ath9k_hw_def_set_board_values, .set_addac = ath9k_hw_def_set_addac, .set_txpower = ath9k_hw_def_set_txpower, - .get_spur_channel = ath9k_hw_def_get_spur_channel + .get_spur_channel = ath9k_hw_def_get_spur_channel, + .get_eepmisc = ath9k_hw_def_get_eepmisc }; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index a35f78be8dec..ac36873d6da4 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -731,7 +731,7 @@ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) udelay(100); if (WARN_ON_ONCE(i >= 100)) { - ath_err(common, "PLL4 meaurement not done\n"); + ath_err(common, "PLL4 measurement not done\n"); break; } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 20794660d6ae..084ad1bd495f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -620,6 +620,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, /* Will be cleared in ath9k_start() */ set_bit(ATH_OP_INVALID, &common->op_flags); + sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX | + AIRTIME_USE_NEW_QUEUES); sc->sc_ah = ah; sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 59e3bd0f4c20..58f06ce9a4cf 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -70,10 +70,10 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, goto out; if (txq->mac80211_qnum >= 0) { - struct list_head *list; + struct ath_acq *acq; - list = &sc->cur_chan->acq[txq->mac80211_qnum]; - if (!list_empty(list)) + acq = &sc->cur_chan->acq[txq->mac80211_qnum]; + if (!list_empty(&acq->acq_new) || !list_empty(&acq->acq_old)) pending = true; } out: diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index fb4ba27d92b7..d79837fe333f 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1002,6 +1002,70 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc, } } +static void ath_rx_count_airtime(struct ath_softc *sc, + struct ath_rx_status *rs, + struct sk_buff *skb) +{ + struct ath_node *an; + struct ath_acq *acq; + struct ath_vif *avp; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_sta *sta; + struct ieee80211_rx_status *rxs; + const struct ieee80211_rate *rate; + bool is_sgi, is_40, is_sp; + int phy; + u16 len = rs->rs_datalen; + u32 airtime = 0; + u8 tidno, acno; + + if (!ieee80211_is_data(hdr->frame_control)) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); + if (!sta) + goto exit; + an = (struct ath_node *) sta->drv_priv; + avp = (struct ath_vif *) an->vif->drv_priv; + tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + acno = TID_TO_WME_AC(tidno); + acq = &avp->chanctx->acq[acno]; + + rxs = IEEE80211_SKB_RXCB(skb); + + is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI); + is_40 = !!(rxs->flag & RX_FLAG_40MHZ); + is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE); + + if (!!(rxs->flag & RX_FLAG_HT)) { + /* MCS rates */ + + airtime += ath_pkt_duration(sc, rxs->rate_idx, len, + is_40, is_sgi, is_sp); + } else { + + phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; + rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; + airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, + len, rxs->rate_idx, is_sp); + } + + if (!!(sc->airtime_flags & AIRTIME_USE_RX)) { + spin_lock_bh(&acq->lock); + an->airtime_deficit[acno] -= airtime; + if (an->airtime_deficit[acno] <= 0) + __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno)); + spin_unlock_bh(&acq->lock); + } + ath_debug_airtime(sc, an, airtime, 0); +exit: + rcu_read_unlock(); +} + int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_rxbuf *bf; @@ -1148,6 +1212,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ath9k_antenna_check(sc, &rs); ath9k_apply_ampdu_details(sc, &rs, rxs); ath_debug_rate_stats(sc, &rs, skb); + ath_rx_count_airtime(sc, &rs, skb); hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ack(hdr->frame_control)) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 4e2f3ac266c3..c35a192861ab 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -97,18 +97,6 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb(skb); } -void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) - __acquires(&txq->axq_lock) -{ - spin_lock_bh(&txq->axq_lock); -} - -void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) - __releases(&txq->axq_lock) -{ - spin_unlock_bh(&txq->axq_lock); -} - void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) __releases(&txq->axq_lock) { @@ -124,21 +112,44 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) ath_tx_status(hw, skb); } -static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) +void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct list_head *list; struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; struct ath_chanctx *ctx = avp->chanctx; + struct ath_acq *acq; + struct list_head *tid_list; + u8 acno = TID_TO_WME_AC(tid->tidno); - if (!ctx) + if (!ctx || !list_empty(&tid->list)) return; - list = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; - if (list_empty(&tid->list)) - list_add_tail(&tid->list, list); + + acq = &ctx->acq[acno]; + if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) && + tid->an->airtime_deficit[acno] > 0) + tid_list = &acq->acq_new; + else + tid_list = &acq->acq_old; + + list_add_tail(&tid->list, tid_list); } +void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; + struct ath_chanctx *ctx = avp->chanctx; + struct ath_acq *acq; + + if (!ctx || !list_empty(&tid->list)) + return; + + acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; + spin_lock_bh(&acq->lock); + __ath_tx_queue_tid(sc, tid); + spin_unlock_bh(&acq->lock); +} + + void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) { struct ath_softc *sc = hw->priv; @@ -153,7 +164,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) ath_txq_lock(sc, txq); tid->has_queued = true; - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); ath_txq_unlock(sc, txq); @@ -660,7 +671,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, skb_queue_splice_tail(&bf_pending, &tid->retry_q); if (!an->sleeping) { - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; @@ -688,6 +699,53 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); } +static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf, struct ath_tx_status *ts) +{ + struct ath_node *an; + struct ath_acq *acq = &sc->cur_chan->acq[txq->mac80211_qnum]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_tx_rate rates[4]; + struct ieee80211_sta *sta; + int i; + u32 airtime = 0; + + skb = bf->bf_mpdu; + if(!skb) + return; + + hdr = (struct ieee80211_hdr *)skb->data; + memcpy(rates, bf->rates, sizeof(rates)); + + rcu_read_lock(); + + sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); + if(!sta) + goto exit; + + + an = (struct ath_node *) sta->drv_priv; + + airtime += ts->duration * (ts->ts_longretry + 1); + + for(i=0; i < ts->ts_rateindex; i++) + airtime += ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i) * rates[i].count; + + if (!!(sc->airtime_flags & AIRTIME_USE_TX)) { + spin_lock_bh(&acq->lock); + an->airtime_deficit[txq->mac80211_qnum] -= airtime; + if (an->airtime_deficit[txq->mac80211_qnum] <= 0) + __ath_tx_queue_tid(sc, ath_get_skb_tid(sc, an, skb)); + spin_unlock_bh(&acq->lock); + } + ath_debug_airtime(sc, an, 0, airtime); + +exit: + rcu_read_unlock(); +} + static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) @@ -709,6 +767,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, ts->ts_rateindex); + ath_tx_count_airtime(sc, txq, bf, ts); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); @@ -1068,8 +1127,8 @@ finish: * width - 0 for 20 MHz, 1 for 40 MHz * half_gi - to use 4us v/s 3.6 us for symbol time */ -static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, - int width, int half_gi, bool shortPreamble) +u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, + int width, int half_gi, bool shortPreamble) { u32 nbits, nsymbits, duration, nsymbols; int streams; @@ -1151,8 +1210,9 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, if (is_40) { u8 power_ht40delta; struct ar5416_eeprom_def *eep = &ah->eeprom.def; + u16 eeprom_rev = ah->eep_ops->get_eeprom_rev(ah); - if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) { + if (eeprom_rev >= AR5416_EEP_MINOR_VER_2) { bool is_2ghz; struct modal_eep_header *pmodal; @@ -1467,7 +1527,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, } static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid, bool *stop) + struct ath_atx_tid *tid) { struct ath_buf *bf; struct ieee80211_tx_info *tx_info; @@ -1489,7 +1549,6 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { __skb_queue_tail(&tid->retry_q, bf->bf_mpdu); - *stop = true; return false; } @@ -1613,7 +1672,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_lock(sc, txq); tid->clear_ps_filter = true; if (ath_tid_has_buffered(tid)) { - ath_tx_queue_tid(sc, txq, tid); + ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); } ath_txq_unlock_complete(sc, txq); @@ -1912,9 +1971,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_atx_tid *tid, *last_tid; + struct ath_atx_tid *tid; struct list_head *tid_list; - bool sent = false; + struct ath_acq *acq; + bool active = AIRTIME_ACTIVE(sc->airtime_flags); if (txq->mac80211_qnum < 0) return; @@ -1923,48 +1983,55 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) return; spin_lock_bh(&sc->chan_lock); - tid_list = &sc->cur_chan->acq[txq->mac80211_qnum]; - - if (list_empty(tid_list)) { - spin_unlock_bh(&sc->chan_lock); - return; - } - rcu_read_lock(); + acq = &sc->cur_chan->acq[txq->mac80211_qnum]; - last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list); - while (!list_empty(tid_list)) { - bool stop = false; - - if (sc->cur_chan->stopped) - break; - - tid = list_first_entry(tid_list, struct ath_atx_tid, list); - list_del_init(&tid->list); + if (sc->cur_chan->stopped) + goto out; - if (ath_tx_sched_aggr(sc, txq, tid, &stop)) - sent = true; +begin: + tid_list = &acq->acq_new; + if (list_empty(tid_list)) { + tid_list = &acq->acq_old; + if (list_empty(tid_list)) + goto out; + } + tid = list_first_entry(tid_list, struct ath_atx_tid, list); - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (ath_tid_has_buffered(tid)) - ath_tx_queue_tid(sc, txq, tid); + if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) { + spin_lock_bh(&acq->lock); + tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM; + list_move_tail(&tid->list, &acq->acq_old); + spin_unlock_bh(&acq->lock); + goto begin; + } - if (stop) - break; + if (!ath_tid_has_buffered(tid)) { + spin_lock_bh(&acq->lock); + if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old)) + list_move_tail(&tid->list, &acq->acq_old); + else { + list_del_init(&tid->list); + } + spin_unlock_bh(&acq->lock); + goto begin; + } - if (tid == last_tid) { - if (!sent) - break; - sent = false; - last_tid = list_entry(tid_list->prev, - struct ath_atx_tid, list); + /* + * If we succeed in scheduling something, immediately restart to make + * sure we keep the HW busy. + */ + if(ath_tx_sched_aggr(sc, txq, tid)) { + if (!active) { + spin_lock_bh(&acq->lock); + list_move_tail(&tid->list, &acq->acq_old); + spin_unlock_bh(&acq->lock); } + goto begin; } +out: rcu_read_unlock(); spin_unlock_bh(&sc->chan_lock); } @@ -2818,6 +2885,9 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) struct ath_atx_tid *tid; int tidno, acno; + for (acno = 0; acno < IEEE80211_NUM_ACS; acno++) + an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM; + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); tid->an = an; diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig index 591ebaea8265..4b83e87f0b94 100644 --- a/drivers/net/wireless/ath/wcn36xx/Kconfig +++ b/drivers/net/wireless/ath/wcn36xx/Kconfig @@ -1,6 +1,8 @@ config WCN36XX tristate "Qualcomm Atheros WCN3660/3680 support" depends on MAC80211 && HAS_DMA + depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n + depends on QCOM_SMD || QCOM_SMD=n ---help--- This module adds support for wireless adapters based on Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets. diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 231fd022f0f5..87dfdaf9044c 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/interrupt.h> +#include <linux/soc/qcom/smem_state.h> #include "wcn36xx.h" #include "txrx.h" @@ -151,9 +152,12 @@ int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn) goto out_err; /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */ - ret = wcn->ctrl_ops->smsm_change_state( - WCN36XX_SMSM_WLAN_TX_ENABLE, - WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + ret = qcom_smem_state_update_bits(wcn->tx_enable_state, + WCN36XX_SMSM_WLAN_TX_ENABLE | + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY, + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + if (ret) + goto out_err; return 0; @@ -678,9 +682,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, * notify chip about new frame through SMSM bus. */ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) { - wcn->ctrl_ops->smsm_change_state( - 0, - WCN36XX_SMSM_WLAN_TX_ENABLE); + qcom_smem_state_update_bits(wcn->tx_rings_empty_state, + WCN36XX_SMSM_WLAN_TX_ENABLE, + WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 4f87ef1e1eb8..b765c647319d 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -350,6 +350,8 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, + WCN36XX_HAL_PRINT_REG_INFO_IND = 259, + WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE }; @@ -4703,4 +4705,18 @@ struct stats_class_b_ind { u32 rx_time_total; }; +/* WCN36XX_HAL_PRINT_REG_INFO_IND */ +struct wcn36xx_hal_print_reg_info_ind { + struct wcn36xx_hal_msg_header header; + + u32 count; + u32 scenario; + u32 reason; + + struct { + u32 addr; + u32 value; + } regs[]; +} __packed; + #endif /* _HAL_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e1d59da2ad20..0002190c9041 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -21,6 +21,10 @@ #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/soc/qcom/smd.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/soc/qcom/wcnss_ctrl.h> #include "wcn36xx.h" unsigned int wcn36xx_dbg_mask; @@ -564,23 +568,59 @@ out: return ret; } -static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *mac_addr) +static void wcn36xx_hw_scan_worker(struct work_struct *work) { - struct wcn36xx *wcn = hw->priv; + struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work); + struct cfg80211_scan_request *req = wcn->scan_req; + u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; + struct cfg80211_scan_info scan_info = {}; + int i; + + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels); + + for (i = 0; i < req->n_channels; i++) + channels[i] = req->channels[i]->hw_value; + + wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels); wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN); - wcn36xx_smd_start_scan(wcn); + for (i = 0; i < req->n_channels; i++) { + wcn->scan_freq = req->channels[i]->center_freq; + wcn->scan_band = req->channels[i]->band; + + wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value); + msleep(30); + wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value); + + wcn->scan_freq = 0; + } + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + + scan_info.aborted = false; + ieee80211_scan_completed(wcn->hw, &scan_info); + + mutex_lock(&wcn->scan_lock); + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); } -static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int wcn36xx_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) { struct wcn36xx *wcn = hw->priv; - wcn36xx_smd_end_scan(wcn); - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + mutex_lock(&wcn->scan_lock); + if (wcn->scan_req) { + mutex_unlock(&wcn->scan_lock); + return -EBUSY; + } + wcn->scan_req = &hw_req->req; + mutex_unlock(&wcn->scan_lock); + + schedule_work(&wcn->scan_work); + + return 0; } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, @@ -993,8 +1033,7 @@ static const struct ieee80211_ops wcn36xx_ops = { .configure_filter = wcn36xx_configure_filter, .tx = wcn36xx_tx, .set_key = wcn36xx_set_key, - .sw_scan_start = wcn36xx_sw_scan_start, - .sw_scan_complete = wcn36xx_sw_scan_complete, + .hw_scan = wcn36xx_hw_scan, .bss_info_changed = wcn36xx_bss_info_changed, .set_rts_threshold = wcn36xx_set_rts_threshold, .sta_add = wcn36xx_sta_add, @@ -1019,6 +1058,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) ieee80211_hw_set(wcn->hw, SUPPORTS_PS); ieee80211_hw_set(wcn->hw, SIGNAL_DBM); ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL); + ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS); wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -1028,6 +1068,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; + wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS; + wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN; + wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -1058,8 +1101,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, int ret; /* Set TX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlantx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx"); if (!res) { wcn36xx_err("failed to get tx_irq\n"); return -ENOENT; @@ -1067,14 +1109,29 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, wcn->tx_irq = res->start; /* Set RX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlanrx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx"); if (!res) { wcn36xx_err("failed to get rx_irq\n"); return -ENOENT; } wcn->rx_irq = res->start; + /* Acquire SMSM tx enable handle */ + wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev, + "tx-enable", &wcn->tx_enable_state_bit); + if (IS_ERR(wcn->tx_enable_state)) { + wcn36xx_err("failed to get tx-enable state\n"); + return PTR_ERR(wcn->tx_enable_state); + } + + /* Acquire SMSM tx rings empty handle */ + wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev, + "tx-rings-empty", &wcn->tx_rings_empty_state_bit); + if (IS_ERR(wcn->tx_rings_empty_state)) { + wcn36xx_err("failed to get tx-rings-empty state\n"); + return PTR_ERR(wcn->tx_rings_empty_state); + } + mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0); if (!mmio_node) { wcn36xx_err("failed to acquire qcom,mmio reference\n"); @@ -1115,11 +1172,14 @@ static int wcn36xx_probe(struct platform_device *pdev) { struct ieee80211_hw *hw; struct wcn36xx *wcn; + void *wcnss; int ret; - u8 addr[ETH_ALEN]; + const u8 *addr; wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n"); + wcnss = dev_get_drvdata(pdev->dev.parent); + hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops); if (!hw) { wcn36xx_err("failed to alloc hw\n"); @@ -1130,11 +1190,26 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; - wcn->ctrl_ops = pdev->dev.platform_data; - mutex_init(&wcn->hal_mutex); + mutex_init(&wcn->scan_lock); + + INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); + + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process); + if (IS_ERR(wcn->smd_channel)) { + wcn36xx_err("failed to open WLAN_CTRL channel\n"); + ret = PTR_ERR(wcn->smd_channel); + goto out_wq; + } - if (!wcn->ctrl_ops->get_hw_mac(addr)) { + qcom_smd_set_drvdata(wcn->smd_channel, hw); + + addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret); + if (addr && ret != ETH_ALEN) { + wcn36xx_err("invalid local-mac-address\n"); + ret = -EINVAL; + goto out_wq; + } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); } @@ -1158,6 +1233,7 @@ out_wq: out_err: return ret; } + static int wcn36xx_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); @@ -1165,45 +1241,37 @@ static int wcn36xx_remove(struct platform_device *pdev) wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n"); release_firmware(wcn->nv); - mutex_destroy(&wcn->hal_mutex); ieee80211_unregister_hw(hw); + + qcom_smem_state_put(wcn->tx_enable_state); + qcom_smem_state_put(wcn->tx_rings_empty_state); + iounmap(wcn->dxe_base); iounmap(wcn->ccu_base); + + mutex_destroy(&wcn->hal_mutex); ieee80211_free_hw(hw); return 0; } -static const struct platform_device_id wcn36xx_platform_id_table[] = { - { - .name = "wcn36xx", - .driver_data = 0 - }, + +static const struct of_device_id wcn36xx_of_match[] = { + { .compatible = "qcom,wcnss-wlan" }, {} }; -MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table); +MODULE_DEVICE_TABLE(of, wcn36xx_of_match); static struct platform_driver wcn36xx_driver = { .probe = wcn36xx_probe, .remove = wcn36xx_remove, .driver = { .name = "wcn36xx", + .of_match_table = wcn36xx_of_match, }, - .id_table = wcn36xx_platform_id_table, }; -static int __init wcn36xx_init(void) -{ - platform_driver_register(&wcn36xx_driver); - return 0; -} -module_init(wcn36xx_init); - -static void __exit wcn36xx_exit(void) -{ - platform_driver_unregister(&wcn36xx_driver); -} -module_exit(wcn36xx_exit); +module_platform_driver(wcn36xx_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index a443992320f2..1c2966f7db7a 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -19,6 +19,7 @@ #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/bitops.h> +#include <linux/soc/qcom/smd.h> #include "smd.h" struct wcn36xx_cfg_val { @@ -253,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) init_completion(&wcn->hal_rsp_compl); start = jiffies; - ret = wcn->ctrl_ops->tx(wcn->hal_buf, len); + ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { wcn36xx_err("HAL TX failed\n"); goto out; @@ -521,7 +522,7 @@ out: return ret; } -int wcn36xx_smd_start_scan(struct wcn36xx *wcn) +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_start_scan_req_msg msg_body; int ret = 0; @@ -529,7 +530,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -551,7 +552,7 @@ out: return ret; } -int wcn36xx_smd_end_scan(struct wcn36xx *wcn) +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_end_scan_req_msg msg_body; int ret = 0; @@ -559,7 +560,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2108,6 +2109,30 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn, return -ENOENT; } +static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn, + void *buf, + size_t len) +{ + struct wcn36xx_hal_print_reg_info_ind *rsp = buf; + int i; + + if (len < sizeof(*rsp)) { + wcn36xx_warn("Corrupted print reg info indication\n"); + return -EIO; + } + + wcn36xx_dbg(WCN36XX_DBG_HAL, + "reginfo indication, scenario: 0x%x reason: 0x%x\n", + rsp->scenario, rsp->reason); + + for (i = 0; i < rsp->count; i++) { + wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n", + rsp->regs[i].addr, rsp->regs[i].value); + } + + return 0; +} + int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value) { struct wcn36xx_hal_update_cfg_req_msg msg_body, *body; @@ -2180,9 +2205,12 @@ out: return ret; } -static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) +int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, + const void *buf, size_t len) { - struct wcn36xx_hal_msg_header *msg_header = buf; + const struct wcn36xx_hal_msg_header *msg_header = buf; + struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel); + struct wcn36xx *wcn = hw->priv; struct wcn36xx_hal_ind_msg *msg_ind; wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len); @@ -2233,15 +2261,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: - msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL); + case WCN36XX_HAL_PRINT_REG_INFO_IND: + msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC); if (!msg_ind) { - /* - * FIXME: Do something smarter then just - * printing an error. - */ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", msg_header->msg_type); - break; + return -ENOMEM; } msg_ind->msg_len = len; @@ -2257,6 +2282,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); } + + return 0; } static void wcn36xx_ind_smd_work(struct work_struct *work) { @@ -2294,6 +2321,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) hal_ind_msg->msg, hal_ind_msg->msg_len); break; + case WCN36XX_HAL_PRINT_REG_INFO_IND: + wcn36xx_smd_print_reg_info_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); @@ -2315,22 +2347,13 @@ int wcn36xx_smd_open(struct wcn36xx *wcn) INIT_LIST_HEAD(&wcn->hal_ind_queue); spin_lock_init(&wcn->hal_ind_lock); - ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process); - if (ret) { - wcn36xx_err("failed to open control channel\n"); - goto free_wq; - } - - return ret; + return 0; -free_wq: - destroy_workqueue(wcn->hal_ind_wq); out: return ret; } void wcn36xx_smd_close(struct wcn36xx *wcn) { - wcn->ctrl_ops->close(); destroy_workqueue(wcn->hal_ind_wq); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index df80cbbd9d1b..8892ccd67b14 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -51,6 +51,7 @@ struct wcn36xx_hal_ind_msg { }; struct wcn36xx; +struct qcom_smd_channel; int wcn36xx_smd_open(struct wcn36xx *wcn); void wcn36xx_smd_close(struct wcn36xx *wcn); @@ -59,8 +60,8 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn); int wcn36xx_smd_start(struct wcn36xx *wcn); int wcn36xx_smd_stop(struct wcn36xx *wcn); int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); -int wcn36xx_smd_start_scan(struct wcn36xx *wcn); -int wcn36xx_smd_end_scan(struct wcn36xx *wcn); +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel); +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel); int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); @@ -127,6 +128,10 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); + +int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, + const void *buf, size_t len); + int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp); diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 1f34c2e912d7..8c387a0a3c09 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -45,9 +45,20 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len); skb_pull(skb, bd->pdu.mpdu_header_off); + hdr = (struct ieee80211_hdr *) skb->data; + fc = __le16_to_cpu(hdr->frame_control); + sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); + + /* When scanning associate beacons to this */ + if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) { + status.freq = wcn->scan_freq; + status.band = wcn->scan_band; + } else { + status.freq = WCN36XX_CENTER_FREQ(wcn); + status.band = WCN36XX_BAND(wcn); + } + status.mactime = 10; - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.signal = -get_rssi0(bd); status.antenna = 1; status.rate_idx = 1; @@ -61,10 +72,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); - hdr = (struct ieee80211_hdr *) skb->data; - fc = __le16_to_cpu(hdr->frame_control); - sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - if (ieee80211_is_beacon(hdr->frame_control)) { wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n", skb, skb->len, fc, sn); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 22242d18e1fe..35a6590c3ee5 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -35,6 +35,9 @@ /* How many frames until we start a-mpdu TX session */ #define WCN36XX_AMPDU_START_THRESH 20 +#define WCN36XX_MAX_SCAN_SSIDS 9 +#define WCN36XX_MAX_SCAN_IE_LEN 500 + extern unsigned int wcn36xx_dbg_mask; enum wcn36xx_debug_mask { @@ -103,19 +106,6 @@ struct nv_data { u8 table; }; -/* Interface for platform control path - * - * @open: hook must be called when wcn36xx wants to open control channel. - * @tx: sends a buffer. - */ -struct wcn36xx_platform_ctrl_ops { - int (*open)(void *drv_priv, void *rsp_cb); - void (*close)(void); - int (*tx)(char *buf, size_t len); - int (*get_hw_mac)(u8 *addr); - int (*smsm_change_state)(u32 clear_mask, u32 set_mask); -}; - /** * struct wcn36xx_vif - holds VIF related fields * @@ -205,7 +195,13 @@ struct wcn36xx { void __iomem *ccu_base; void __iomem *dxe_base; - struct wcn36xx_platform_ctrl_ops *ctrl_ops; + struct qcom_smd_channel *smd_channel; + + struct qcom_smem_state *tx_enable_state; + unsigned tx_enable_state_bit; + struct qcom_smem_state *tx_rings_empty_state; + unsigned tx_rings_empty_state_bit; + /* * smd_buf must be protected with smd_mutex to garantee * that all messages are sent one after another @@ -219,6 +215,12 @@ struct wcn36xx { spinlock_t hal_ind_lock; struct list_head hal_ind_queue; + struct work_struct scan_work; + struct cfg80211_scan_request *scan_req; + int scan_freq; + int scan_band; + struct mutex scan_lock; + /* DXE channels */ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 6aa3ff4240a9..e25e78e71f54 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,6 +20,10 @@ #define WIL_MAX_ROC_DURATION_MS 5000 +bool disable_ap_sme; +module_param(disable_ap_sme, bool, 0444); +MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); + #define CHAN60G(_channel, _flags) { \ .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -62,9 +66,16 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = { }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + BIT(IEEE80211_STYPE_PROBE_RESP >> 4) | + BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | @@ -194,7 +205,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy, int cid = wil_find_cid(wil, mac); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid); if (cid < 0) return cid; @@ -233,7 +244,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return -ENOENT; ether_addr_copy(mac, wil->sta[cid].addr); - wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); + wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid); rc = wil_cid_fill_sinfo(wil, cid, sinfo); @@ -250,16 +261,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *p2p_wdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "add_iface\n"); if (type != NL80211_IFTYPE_P2P_DEVICE) { - wil_err(wil, "%s: unsupported iftype %d\n", __func__, type); + wil_err(wil, "unsupported iftype %d\n", type); return ERR_PTR(-EINVAL); } if (wil->p2p_wdev) { - wil_err(wil, "%s: P2P_DEVICE interface already created\n", - __func__); + wil_err(wil, "P2P_DEVICE interface already created\n"); return ERR_PTR(-EINVAL); } @@ -282,11 +292,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "del_iface\n"); if (wdev != wil->p2p_wdev) { - wil_err(wil, "%s: delete of incorrect interface 0x%p\n", - __func__, wdev); + wil_err(wil, "delete of incorrect interface 0x%p\n", wdev); return -EINVAL; } @@ -304,7 +313,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct wireless_dev *wdev = wil_to_wdev(wil); int rc; - wil_dbg_misc(wil, "%s() type=%d\n", __func__, type); + wil_dbg_misc(wil, "change_iface: type=%d\n", type); if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) { wil_dbg_misc(wil, "interface is up. resetting...\n"); @@ -351,8 +360,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, uint i, n; int rc; - wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n", - __func__, wdev, wdev->iftype); + wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype); /* check we are client side */ switch (wdev->iftype) { @@ -557,7 +565,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, int rc = 0; enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "connect\n"); wil_print_connect_params(wil, sme); if (test_bit(wil_status_fwconnecting, wil->status) || @@ -593,6 +601,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, goto out; } wil->privacy = sme->privacy; + wil->pbss = sme->pbss; if (wil->privacy) { /* For secure assoc, remove old keys */ @@ -689,12 +698,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); + wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code); if (!(test_bit(wil_status_fwconnecting, wil->status) || test_bit(wil_status_fwconnected, wil->status))) { - wil_err(wil, "%s: Disconnect was called while disconnected\n", - __func__); + wil_err(wil, "Disconnect was called while disconnected\n"); return 0; } @@ -702,7 +710,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, WMI_DISCONNECT_EVENTID, NULL, 0, WIL6210_DISCONNECT_TO_MS); if (rc) - wil_err(wil, "%s: disconnect error %d\n", __func__, rc); + wil_err(wil, "disconnect error %d\n", rc); return rc; } @@ -750,7 +758,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "mgmt_tx\n"); print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len); cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); @@ -811,7 +819,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, break; } } - wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); + wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]); return rc; } @@ -916,13 +924,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, return -EINVAL; } - wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__, + wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n", mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); if (IS_ERR(cs)) { - wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n", - __func__, mac_addr, key_usage_str[key_usage], key_index, + wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n", + mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); return -EINVAL; } @@ -931,8 +939,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { wil_err(wil, - "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n", - params->seq_len, __func__, mac_addr, + "Wrong PN len %d, %pM %s[%d] PN %*phN\n", + params->seq_len, mac_addr, key_usage_str[key_usage], key_index, params->seq_len, params->seq); return -EINVAL; @@ -956,11 +964,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage, mac_addr); - wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr, + wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr, key_usage_str[key_usage], key_index); if (IS_ERR(cs)) - wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__, + wil_info(wil, "Not connected, %pM %s[%d]\n", mac_addr, key_usage_str[key_usage], key_index); if (!IS_ERR_OR_NULL(cs)) @@ -977,7 +985,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "set_default_key: entered\n"); return 0; } @@ -990,8 +998,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; - wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n", - __func__, chan->center_freq, duration, wdev->iftype); + wil_dbg_misc(wil, + "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n", + chan->center_freq, duration, wdev->iftype); rc = wil_p2p_listen(wil, wdev, duration, chan, cookie); return rc; @@ -1003,7 +1012,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "cancel_remain_on_channel\n"); return wil_p2p_cancel_listen(wil, cookie); } @@ -1159,9 +1168,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, if (pbss) wmi_nettype = WMI_NETTYPE_P2P; - wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go); + wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go); if (is_go && !pbss) { - wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__); + wil_err(wil, "P2P GO must be in PBSS\n"); return -ENOTSUPP; } @@ -1216,7 +1225,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, int rc; u32 privacy = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "change_beacon\n"); wil_print_bcon_data(bcon); if (bcon->tail && @@ -1255,7 +1264,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct cfg80211_crypto_settings *crypto = &info->crypto; u8 hidden_ssid; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "start_ap\n"); if (!channel) { wil_err(wil, "AP: No channel???\n"); @@ -1306,7 +1315,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop_ap\n"); netif_carrier_off(ndev); wil_set_recovery_state(wil, fw_recovery_idle); @@ -1322,13 +1331,35 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_add_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid); + + if (!disable_ap_sme) { + wil_err(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (params->aid > WIL_MAX_DMG_AID) { + wil_err(wil, "invalid aid\n"); + return -EINVAL; + } + + return wmi_new_sta(wil, mac, params->aid); +} + static int wil_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, + wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac, params->reason_code); mutex_lock(&wil->mutex); @@ -1338,6 +1369,52 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, return 0; } +static int wil_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int authorize; + int cid, i; + struct vring_tx_data *txdata = NULL; + + wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac, + params->sta_flags_mask, params->sta_flags_set); + + if (!disable_ap_sme) { + wil_dbg_misc(wil, "not supported with AP SME enabled\n"); + return -EOPNOTSUPP; + } + + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + return 0; + + cid = wil_find_cid(wil, mac); + if (cid < 0) { + wil_err(wil, "station not found\n"); + return -ENOLINK; + } + + for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) + if (wil->vring2cid_tid[i][0] == cid) { + txdata = &wil->vring_tx_data[i]; + break; + } + + if (!txdata) { + wil_err(wil, "vring data not found\n"); + return -ENOLINK; + } + + authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED); + txdata->dot1x_open = authorize ? 1 : 0; + wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i, + txdata->dot1x_open); + + return 0; +} + /* probe_client handling */ static void wil_probe_client_handle(struct wil6210_priv *wil, struct wil_probe_client_req *req) @@ -1387,7 +1464,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil) { struct wil_probe_client_req *req, *t; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "probe_client_flush\n"); mutex_lock(&wil->probe_client_mutex); @@ -1407,7 +1484,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy, struct wil_probe_client_req *req; int cid = wil_find_cid(wil, peer); - wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid); + wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid); if (cid < 0) return -ENOLINK; @@ -1435,7 +1512,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); if (params->ap_isolate >= 0) { - wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__, + wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n", wil->ap_isolate, params->ap_isolate); wil->ap_isolate = params->ap_isolate; } @@ -1448,7 +1525,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "start_p2p_device: entered\n"); wil->p2p.p2p_dev_started = 1; return 0; } @@ -1462,7 +1539,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, if (!p2p->p2p_dev_started) return; - wil_dbg_misc(wil, "%s: entered\n", __func__); + wil_dbg_misc(wil, "stop_p2p_device: entered\n"); mutex_lock(&wil->mutex); mutex_lock(&wil->p2p_wdev_mutex); wil_p2p_stop_radio_operations(wil); @@ -1499,7 +1576,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, return rc; } -static struct cfg80211_ops wil_cfg80211_ops = { +static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, .scan = wil_cfg80211_scan, @@ -1521,7 +1598,9 @@ static struct cfg80211_ops wil_cfg80211_ops = { .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, + .add_station = wil_cfg80211_add_station, .del_station = wil_cfg80211_del_station, + .change_station = wil_cfg80211_change_station, .probe_client = wil_cfg80211_probe_client, .change_bss = wil_cfg80211_change_bss, /* P2P device */ @@ -1542,10 +1621,11 @@ static void wil_wiphy_init(struct wiphy *wiphy) BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_MONITOR); - wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (!disable_ap_sme) + wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", __func__, wiphy->flags); wiphy->probe_resp_offload = diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 5e4058a4037b..3e8cdf12feda 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, } static const struct dbg_off isr_off[] = { - {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32}, - {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32}, - {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32}, - {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32}, - {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32}, - {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32}, - {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32}, + {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32}, + {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32}, + {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32}, + {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32}, + {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32}, + {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32}, + {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32}, {}, }; @@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, } static const struct dbg_off pseudo_isr_off[] = { - {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, - {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, - {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, + {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, + {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, + {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, {}, }; @@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, } static const struct dbg_off lgc_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, {}, }; static const struct dbg_off tx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL), doff_io32}, {}, }; static const struct dbg_off rx_itr_cnt_off[] = { - {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), + {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH), doff_io32}, - {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), + {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA), doff_io32}, - {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), + {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL), doff_io32}, - {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), + {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH), doff_io32}, - {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), + {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA), doff_io32}, - {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), + {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL), doff_io32}, {}, }; @@ -813,7 +813,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL); kfree(frame); - wil_info(wil, "%s() -> %d\n", __func__, rc); + wil_info(wil, "-> %d\n", rc); return len; } @@ -855,7 +855,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, rc1 = wmi_send(wil, cmdid, cmd, cmdlen); kfree(wmi); - wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1); + wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1); return rc; } @@ -1379,6 +1379,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + u8 aid = 0; switch (p->status) { case wil_sta_unused: @@ -1389,9 +1390,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) break; case wil_sta_connected: status = "connected"; + aid = p->aid; break; } - seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); + seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); @@ -1622,7 +1624,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, blob->data = (void * __force)wil->csr + HOSTADDR(map->host); blob->size = map->to - map->from; snprintf(name, sizeof(name), "blob_%s", map->name); - wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob); + wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob); } } @@ -1632,29 +1634,29 @@ static const struct { umode_t mode; const struct file_operations *fops; } dbg_files[] = { - {"mbox", S_IRUGO, &fops_mbox}, - {"vrings", S_IRUGO, &fops_vring}, - {"stations", S_IRUGO, &fops_sta}, - {"desc", S_IRUGO, &fops_txdesc}, - {"bf", S_IRUGO, &fops_bf}, - {"ssid", S_IRUGO | S_IWUSR, &fops_ssid}, - {"mem_val", S_IRUGO, &fops_memread}, - {"reset", S_IWUSR, &fops_reset}, - {"rxon", S_IWUSR, &fops_rxon}, - {"tx_mgmt", S_IWUSR, &fops_txmgmt}, - {"wmi_send", S_IWUSR, &fops_wmi}, - {"back", S_IRUGO | S_IWUSR, &fops_back}, - {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg}, - {"pmcdata", S_IRUGO, &fops_pmcdata}, - {"temp", S_IRUGO, &fops_temp}, - {"freq", S_IRUGO, &fops_freq}, - {"link", S_IRUGO, &fops_link}, - {"info", S_IRUGO, &fops_info}, - {"recovery", S_IRUGO | S_IWUSR, &fops_recovery}, - {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg}, - {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time}, - {"fw_capabilities", S_IRUGO, &fops_fw_capabilities}, - {"fw_version", S_IRUGO, &fops_fw_version}, + {"mbox", 0444, &fops_mbox}, + {"vrings", 0444, &fops_vring}, + {"stations", 0444, &fops_sta}, + {"desc", 0444, &fops_txdesc}, + {"bf", 0444, &fops_bf}, + {"ssid", 0644, &fops_ssid}, + {"mem_val", 0644, &fops_memread}, + {"reset", 0244, &fops_reset}, + {"rxon", 0244, &fops_rxon}, + {"tx_mgmt", 0244, &fops_txmgmt}, + {"wmi_send", 0244, &fops_wmi}, + {"back", 0644, &fops_back}, + {"pmccfg", 0644, &fops_pmccfg}, + {"pmcdata", 0444, &fops_pmcdata}, + {"temp", 0444, &fops_temp}, + {"freq", 0444, &fops_freq}, + {"link", 0444, &fops_link}, + {"info", 0444, &fops_info}, + {"recovery", 0644, &fops_recovery}, + {"led_cfg", 0644, &fops_led_cfg}, + {"led_blink_time", 0644, &fops_led_blink_time}, + {"fw_capabilities", 0444, &fops_fw_capabilities}, + {"fw_version", 0444, &fops_fw_version}, }; static void wil6210_debugfs_init_files(struct wil6210_priv *wil, @@ -1693,30 +1695,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, /* fields in struct wil6210_priv */ static const struct dbg_off dbg_wil_off[] = { - WIL_FIELD(privacy, S_IRUGO, doff_u32), - WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong), - WIL_FIELD(hw_version, S_IRUGO, doff_x32), - WIL_FIELD(recovery_count, S_IRUGO, doff_u32), - WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), - WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8), + WIL_FIELD(privacy, 0444, doff_u32), + WIL_FIELD(status[0], 0644, doff_ulong), + WIL_FIELD(hw_version, 0444, doff_x32), + WIL_FIELD(recovery_count, 0444, doff_u32), + WIL_FIELD(ap_isolate, 0444, doff_u32), + WIL_FIELD(discovery_mode, 0644, doff_u8), + WIL_FIELD(chip_revision, 0444, doff_u8), + WIL_FIELD(abft_len, 0644, doff_u8), {}, }; static const struct dbg_off dbg_wil_regs[] = { - {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), + {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), doff_io32}, - {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, + {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, {}, }; /* static parameters */ static const struct dbg_off dbg_statics[] = { - {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, - {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, - {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh, + {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, + {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, + {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, + {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh, doff_u32}, - {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8}, + {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 7053b62ca8d3..adcfef4dabf7 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev) mutex_lock(&wil->mutex); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_begin\n"); return 0; } @@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_complete\n"); mutex_unlock(&wil->mutex); } @@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, u32 tx_itr_en, tx_itr_val = 0; u32 rx_itr_en, rx_itr_val = 0; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "ethtoolops_get_coalesce\n"); tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) @@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__, + wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 82aae2d705b4..540fc20984d8 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,8 +19,9 @@ #include "wil6210.h" #include "fw.h" -MODULE_FIRMWARE(WIL_FW_NAME); -MODULE_FIRMWARE(WIL_FW2_NAME); +MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT); +MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS); +MODULE_FIRMWARE(WIL_BOARD_FILE_NAME); static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 8f40eb301924..f4901587c005 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -537,3 +537,22 @@ out: release_firmware(fw); return rc; } + +/** + * wil_fw_verify_file_exists - checks if firmware file exist + * + * @wil: driver context + * @name: firmware file name + * + * return value - boolean, true for success, false for failure + */ +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, name, wil_to_dev(wil)); + if (!rc) + release_firmware(fw); + return rc != -ENOENT; +} diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 64046e0bd0a2..cab1e5c0e374 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil) static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) { - wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__, + wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n", mask_halp ? "true" : "false"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), @@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) void wil6210_mask_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), BIT_DMA_EP_MISC_ICR_HALP); @@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil) static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq_pseudo\n"); wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE); @@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil) static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) { - wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__, + wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n", unmask_halp ? "true" : "false"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), @@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) static void wil6210_unmask_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), BIT_DMA_EP_MISC_ICR_HALP); @@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil) static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq_pseudo\n"); set_bit(wil_status_irqen, wil->status); @@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) void wil_mask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "mask_irq\n"); wil6210_mask_irq_tx(wil); wil6210_mask_irq_rx(wil); @@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil) void wil_unmask_irq(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "unmask_irq\n"); wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC), WIL_ICR_ICC_VALUE); @@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil) void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "configure_interrupt_moderation\n"); /* disable interrupt moderation for monitor * to get better timestamp precision @@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) } if (isr & BIT_DMA_EP_MISC_ICR_HALP) { - wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__); + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); wil6210_mask_halp(wil); isr &= ~BIT_DMA_EP_MISC_ICR_HALP; complete(&wil->halp.comp); @@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil) void wil6210_set_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "set_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS), BIT_DMA_EP_MISC_ICR_HALP); @@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil) void wil6210_clear_halp(struct wil6210_priv *wil) { - wil_dbg_irq(wil, "%s()\n", __func__); + wil_dbg_irq(wil, "clear_halp\n"); wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR), BIT_DMA_EP_MISC_ICR_HALP); @@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) { int rc; - wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx"); + wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); rc = request_threaded_irq(irq, wil6210_hardirq, wil6210_thread_irq, @@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) void wil6210_fini_irq(struct wil6210_priv *wil, int irq) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "fini_irq:\n"); wil_mask_irq(wil); free_irq(irq, wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index e2e021bcaa03..efb1f59aafd9 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -27,23 +27,23 @@ #define WAIT_FOR_SCAN_ABORT_MS 1000 bool debug_fw; /* = false; */ -module_param(debug_fw, bool, S_IRUGO); +module_param(debug_fw, bool, 0444); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); static bool oob_mode; -module_param(oob_mode, bool, S_IRUGO); +module_param(oob_mode, bool, 0444); MODULE_PARM_DESC(oob_mode, " enable out of the box (OOB) mode in FW, for diagnostics and certification"); bool no_fw_recovery; -module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); +module_param(no_fw_recovery, bool, 0644); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); /* if not set via modparam, will be set to default value of 1/8 of * rx ring size during init flow */ unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT; -module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO); +module_param(rx_ring_overflow_thrsh, ushort, 0444); MODULE_PARM_DESC(rx_ring_overflow_thrsh, " RX ring overflow threshold in descriptors."); @@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = { .get = param_get_uint, }; -module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO); +module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444); MODULE_PARM_DESC(mtu_max, " Max MTU value."); static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; @@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = { .get = param_get_uint, }; -module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO); +module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444); MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order"); -module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO); +module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444); MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order"); -module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO); +module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444); MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order"); #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ @@ -172,12 +172,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct wil_sta_info *sta = &wil->sta[cid]; might_sleep(); - wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, - sta->status); + wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n", + cid, sta->status); /* inform upper/lower layers */ if (sta->status != wil_sta_unused) { - if (!from_event) - wmi_disconnect_sta(wil, sta->addr, reason_code, true); + if (!from_event) { + bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ? + disable_ap_sme : false; + wmi_disconnect_sta(wil, sta->addr, reason_code, + true, del_sta); + } switch (wdev->iftype) { case NL80211_IFTYPE_AP: @@ -237,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, return; might_sleep(); - wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, + wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid, reason_code, from_event ? "+" : "-"); /* Cases are: @@ -347,7 +351,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil) void wil_set_recovery_state(struct wil6210_priv *wil, int state) { - wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__, + wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n", wil->recovery_state, state); wil->recovery_state = state; @@ -489,7 +493,7 @@ int wil_priv_init(struct wil6210_priv *wil) { uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_init\n"); memset(wil->sta, 0, sizeof(wil->sta)); for (i = 0; i < WIL6210_MAX_CID; i++) @@ -564,7 +568,7 @@ out_wmi_wq: void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "disconnect\n"); del_timer_sync(&wil->connect_timer); _wil6210_disconnect(wil, bssid, reason_code, from_event); @@ -572,7 +576,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, void wil_priv_deinit(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "priv_deinit\n"); wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); @@ -605,7 +609,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable) { - wil_info(wil, "%s: enable=%d\n", __func__, enable); + wil_info(wil, "enable=%d\n", enable); if (enable) wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); else @@ -861,7 +865,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "reset\n"); WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); @@ -884,9 +888,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_PRE_RESET); if (rc) - wil_err(wil, - "%s: PRE_RESET platform notify failed, rc %d\n", - __func__, rc); + wil_err(wil, "PRE_RESET platform notify failed, rc %d\n", + rc); } set_bit(wil_status_resetting, wil->status); @@ -915,7 +918,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wmi_wq); wil_bl_crash_info(wil, false); + wil_disable_irq(wil); rc = wil_target_reset(wil); + wil6210_clear_irq(wil); + wil_enable_irq(wil); wil_rx_fini(wil); if (rc) { wil_bl_crash_info(wil, true); @@ -930,16 +936,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_set_oob_mode(wil, oob_mode); if (load_fw) { - wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, - WIL_FW2_NAME); + wil_info(wil, "Use firmware <%s> + board <%s>\n", + wil->wil_fw_name, WIL_BOARD_FILE_NAME); wil_halt_cpu(wil); memset(wil->fw_version, 0, sizeof(wil->fw_version)); /* Loading f/w from the file */ - rc = wil_request_firmware(wil, WIL_FW_NAME, true); + rc = wil_request_firmware(wil, wil->wil_fw_name, true); if (rc) return rc; - rc = wil_request_firmware(wil, WIL_FW2_NAME, true); + rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true); if (rc) return rc; @@ -976,8 +982,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* check FW is responsive */ rc = wmi_echo(wil); if (rc) { - wil_err(wil, "%s: wmi_echo failed, rc %d\n", - __func__, rc); + wil_err(wil, "wmi_echo failed, rc %d\n", rc); return rc; } @@ -987,9 +992,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_FW_RDY); if (rc) { - wil_err(wil, - "%s: FW_RDY notify failed, rc %d\n", - __func__, rc); + wil_err(wil, "FW_RDY notify failed, rc %d\n", + rc); rc = 0; } } @@ -1073,7 +1077,7 @@ int wil_up(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "up\n"); mutex_lock(&wil->mutex); rc = __wil_up(wil); @@ -1113,7 +1117,7 @@ int wil_down(struct wil6210_priv *wil) { int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "down\n"); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -1146,25 +1150,24 @@ void wil_halp_vote(struct wil6210_priv *wil) mutex_lock(&wil->halp.lock); - wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); if (++wil->halp.ref_cnt == 1) { wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { - wil_err(wil, "%s: HALP vote timed out\n", __func__); + wil_err(wil, "HALP vote timed out\n"); /* Mask HALP as done in case the interrupt is raised */ wil6210_mask_halp(wil); } else { wil_dbg_irq(wil, - "%s: HALP vote completed after %d ms\n", - __func__, + "halp_vote: HALP vote completed after %d ms\n", jiffies_to_msecs(to_jiffies - rc)); } } - wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); mutex_unlock(&wil->halp.lock); @@ -1176,15 +1179,15 @@ void wil_halp_unvote(struct wil6210_priv *wil) mutex_lock(&wil->halp.lock); - wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); if (--wil->halp.ref_cnt == 0) { wil6210_clear_halp(wil); - wil_dbg_irq(wil, "%s: HALP unvote\n", __func__); + wil_dbg_irq(wil, "HALP unvote\n"); } - wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__, + wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n", wil->halp.ref_cnt); mutex_unlock(&wil->halp.lock); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 6676001dcbca..708facd5f667 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,10 +22,11 @@ static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "open\n"); - if (debug_fw) { - wil_err(wil, "%s() while in debug_fw mode\n", __func__); + if (debug_fw || + test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) { + wil_err(wil, "while in debug_fw or wmi_only mode\n"); return -EINVAL; } @@ -36,7 +37,7 @@ static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "stop\n"); return wil_down(wil); } @@ -68,7 +69,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) done = budget - quota; if (done < budget) { - napi_complete(napi); + napi_complete_done(napi, done); wil6210_unmask_irq_rx(wil); wil_dbg_txrx(wil, "NAPI RX complete\n"); } @@ -132,7 +133,7 @@ void *wil_if_alloc(struct device *dev) wil->wdev = wdev; wil->radio_wdev = wdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_alloc\n"); rc = wil_priv_init(wil); if (rc) { @@ -179,7 +180,7 @@ void wil_if_free(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_free\n"); if (!ndev) return; @@ -234,7 +235,7 @@ void wil_if_remove(struct wil6210_priv *wil) struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil_to_wdev(wil); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_remove\n"); unregister_netdev(ndev); wiphy_unregister(wdev->wiphy); diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index fbae99525e01..792484756654 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; - wil_dbg_misc(wil, "%s\n", __func__); + wil_dbg_misc(wil, "p2p_discovery_timer_fn\n"); schedule_work(&wil->p2p.discovery_expired_work); } @@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil, int rc; struct wil_p2p_info *p2p = &wil->p2p; - wil_dbg_misc(wil, "%s: channel %d\n", - __func__, P2P_DMG_SOCIAL_CHANNEL); + wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL); lockdep_assert_held(&wil->mutex); if (p2p->discovery_started) { - wil_err(wil, "%s: search failed. discovery already ongoing\n", - __func__); + wil_err(wil, "search failed. discovery already ongoing\n"); rc = -EBUSY; goto out; } rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI); if (rc) { - wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__); + wil_err(wil, "wmi_p2p_cfg failed\n"); goto out; } rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID); if (rc) { - wil_err(wil, "%s: wmi_set_ssid failed\n", __func__); + wil_err(wil, "wmi_set_ssid failed\n"); goto out_stop; } @@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil, rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); if (rc) { - wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n", - __func__); + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n"); goto out_stop; } @@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil, rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, request->ie_len, request->ie); if (rc) { - wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n", - __func__); + wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n"); goto out_stop; } rc = wmi_start_search(wil); if (rc) { - wil_err(wil, "%s: wmi_start_search failed\n", __func__); + wil_err(wil, "wmi_start_search failed\n"); goto out_stop; } @@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev, if (!chan) return -EINVAL; - wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); + wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration); mutex_lock(&wil->mutex); if (p2p->discovery_started) { - wil_err(wil, "%s: discovery already ongoing\n", __func__); + wil_err(wil, "discovery already ongoing\n"); rc = -EBUSY; goto out; } @@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) mutex_lock(&wil->mutex); if (cookie != p2p->cookie) { - wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n", - __func__, p2p->cookie, cookie); + wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n", + p2p->cookie, cookie); mutex_unlock(&wil->mutex); return -ENOENT; } @@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie) mutex_unlock(&wil->mutex); if (!started) { - wil_err(wil, "%s: listen not started\n", __func__); + wil_err(wil, "listen not started\n"); return -ENOENT; } @@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work) struct wil6210_priv, p2p); u8 started; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "p2p_listen_expired\n"); mutex_lock(&wil->mutex); started = wil_p2p_stop_discovery(wil); @@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work) struct wil6210_priv, p2p); u8 started; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "p2p_search_expired\n"); mutex_lock(&wil->mutex); started = wil_p2p_stop_discovery(wil); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 44746ca0d2e6..874c787727fe 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -23,7 +23,7 @@ #include <linux/rtnetlink.h> static bool use_msi = true; -module_param(use_msi, bool, S_IRUGO); +module_param(use_msi, bool, 0444); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); #ifdef CONFIG_PM @@ -36,18 +36,38 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, static void wil_set_capabilities(struct wil6210_priv *wil) { - u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); + u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) & + RGF_USER_REVISION_ID_MASK); bitmap_zero(wil->hw_capabilities, hw_capability_last); bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); - - switch (rev_id) { - case JTAG_DEV_ID_SPARROW_B0: - wil->hw_name = "Sparrow B0"; - wil->hw_version = HW_VER_SPARROW_B0; + wil->wil_fw_name = WIL_FW_NAME_DEFAULT; + wil->chip_revision = chip_revision; + + switch (jtag_id) { + case JTAG_DEV_ID_SPARROW: + switch (chip_revision) { + case REVISION_ID_SPARROW_D0: + wil->hw_name = "Sparrow D0"; + wil->hw_version = HW_VER_SPARROW_D0; + if (wil_fw_verify_file_exists(wil, + WIL_FW_NAME_SPARROW_PLUS)) + wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS; + break; + case REVISION_ID_SPARROW_B0: + wil->hw_name = "Sparrow B0"; + wil->hw_version = HW_VER_SPARROW_B0; + break; + default: + wil->hw_name = "Unknown"; + wil->hw_version = HW_VER_UNKNOWN; + break; + } break; default: - wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id); + wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", + jtag_id, chip_revision); wil->hw_name = "Unknown"; wil->hw_version = HW_VER_UNKNOWN; } @@ -55,7 +75,7 @@ void wil_set_capabilities(struct wil6210_priv *wil) wil_info(wil, "Board hardware is %s\n", wil->hw_name); /* extract FW capabilities from file without loading the FW */ - wil_request_firmware(wil, WIL_FW_NAME, false); + wil_request_firmware(wil, wil->wil_fw_name, false); } void wil_disable_irq(struct wil6210_priv *wil) @@ -79,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) */ int msi_only = pdev->msi_enabled; bool _use_msi = use_msi; + bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, + wil->fw_capabilities); - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only); pdev->msi_enabled = 0; @@ -103,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) if (rc) goto stop_master; - /* need reset here to obtain MAC */ + /* need reset here to obtain MAC or in case of WMI-only FW, full reset + * and fw loading takes place + */ mutex_lock(&wil->mutex); - rc = wil_reset(wil, false); + rc = wil_reset(wil, wmi_only); mutex_unlock(&wil->mutex); if (rc) goto release_irq; @@ -125,7 +149,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "if_pcie_disable\n"); pci_clear_master(pdev); /* disable and release IRQ */ @@ -289,7 +313,7 @@ static void wil_pcie_remove(struct pci_dev *pdev) struct wil6210_priv *wil = pci_get_drvdata(pdev); void __iomem *csr = wil->csr; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "pcie_remove\n"); #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP @@ -327,8 +351,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); rc = wil_can_suspend(wil, is_runtime); if (rc) @@ -354,8 +377,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime) struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); /* allow master */ pci_set_master(pdev); @@ -375,7 +397,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, int rc = 0; enum wil_platform_event evt; - wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode); + wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode); switch (mode) { case PM_HIBERNATION_PREPARE: diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 11ee24d509e5..a0acb2d0cb79 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct wireless_dev *wdev = wil->wdev; - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); if (!netif_running(wil_to_ndev(wil))) { /* can always sleep when down */ @@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) } out: - wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, + wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n", is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); return rc; @@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { @@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) rc = wil->platform_ops.suspend(wil->platform_handle); out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); return rc; } @@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) int rc = 0; struct net_device *ndev = wil_to_ndev(wil); - wil_dbg_pm(wil, "%s(%s)\n", __func__, - is_runtime ? "runtime" : "system"); + wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); if (wil->platform_ops.resume) { rc = wil->platform_ops.resume(wil->platform_handle); @@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) rc = wil_up(wil); out: - wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system", rc); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index b9faae0278c9..3ff4f4ce9fef 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, if (wil_is_pmc_allocated(pmc)) { /* sanity check */ - wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + wil_err(wil, "ERROR pmc is already allocated\n"); goto no_release_err; } if ((num_descriptors <= 0) || (descriptor_size <= 0)) { @@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil, pmc->num_descriptors = num_descriptors; pmc->descriptor_size = descriptor_size; - wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", - __func__, num_descriptors, descriptor_size); + wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", + num_descriptors, descriptor_size); /* allocate descriptors info list in pmc context*/ pmc->descriptors = kcalloc(num_descriptors, sizeof(struct desc_alloc_info), GFP_KERNEL); if (!pmc->descriptors) { - wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + wil_err(wil, "ERROR allocating pmc skb list\n"); goto no_release_err; } - wil_dbg_misc(wil, - "%s: allocated descriptors info list %p\n", - __func__, pmc->descriptors); + wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", + pmc->descriptors); /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 @@ -116,15 +115,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); wil_dbg_misc(wil, - "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", - __func__, + "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); if (!pmc->pring_va) { - wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + wil_err(wil, "ERROR allocating pmc pring\n"); goto release_pmc_skb_list; } @@ -143,9 +141,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); if (unlikely(!pmc->descriptors[i].va)) { - wil_err(wil, - "%s: ERROR allocating pmc descriptor %d", - __func__, i); + wil_err(wil, "ERROR allocating pmc descriptor %d", i); goto release_pmc_skbs; } @@ -165,21 +161,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil, *_d = *d; } - wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); pmc_cmd.op = WMI_PMC_ALLOCATE; pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with ALLOCATE op failed with status %d", + pmc->last_cmd_status); goto release_pmc_skbs; } @@ -188,7 +184,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, return; release_pmc_skbs: - wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); + wil_err(wil, "exit on error: Releasing skbs...\n"); for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { dma_free_coherent(dev, descriptor_size, @@ -197,7 +193,7 @@ release_pmc_skbs: pmc->descriptors[i].va = NULL; } - wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + wil_err(wil, "exit on error: Releasing pring...\n"); dma_free_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, @@ -207,8 +203,7 @@ release_pmc_skbs: pmc->pring_va = NULL; release_pmc_skb_list: - wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", - __func__); + wil_err(wil, "exit on error: Releasing descriptors info list...\n"); kfree(pmc->descriptors); pmc->descriptors = NULL; @@ -232,24 +227,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) pmc->last_cmd_status = 0; if (!wil_is_pmc_allocated(pmc)) { - wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", - __func__); + wil_dbg_misc(wil, + "pmc_free: Error, can't free - not allocated\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return; } if (send_pmc_cmd) { - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", - __func__); + wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s WMI_PMC_CMD with RELEASE op failed, status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with RELEASE op failed, status %d", + pmc->last_cmd_status); /* There's nothing we can do with this error. * Normally, it should never occur. * Continue to freeing all memory allocated for pmc. @@ -261,8 +255,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) size_t buf_size = sizeof(struct vring_tx_desc) * pmc->num_descriptors; - wil_dbg_misc(wil, "%s: free pring va %p\n", - __func__, pmc->pring_va); + wil_dbg_misc(wil, "pmc_free: free pring va %p\n", + pmc->pring_va); dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; @@ -281,11 +275,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } - wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", - __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, + pmc->num_descriptors); wil_dbg_misc(wil, - "%s: free pmc descriptors info list %p\n", - __func__, pmc->descriptors); + "pmc_free: free pmc descriptors info list %p\n", + pmc->descriptors); kfree(pmc->descriptors); pmc->descriptors = NULL; } else { @@ -301,7 +295,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) */ int wil_pmc_last_cmd_status(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", wil->pmc.last_cmd_status); return wil->pmc.last_cmd_status; @@ -324,7 +318,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, mutex_lock(&pmc->lock); if (!wil_is_pmc_allocated(pmc)) { - wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + wil_err(wil, "error, pmc is not allocated!\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return -EPERM; @@ -333,8 +327,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, pmc_size = pmc->descriptor_size * pmc->num_descriptors; wil_dbg_misc(wil, - "%s: size %u, pos %lld\n", - __func__, (unsigned)count, *f_pos); + "pmc_read: size %u, pos %lld\n", + (u32)count, *f_pos); pmc->last_cmd_status = 0; @@ -343,15 +337,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, offset = *f_pos - (idx * pmc->descriptor_size); if (*f_pos >= pmc_size) { - wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", - __func__, *f_pos, (unsigned)pmc_size); + wil_dbg_misc(wil, + "pmc_read: reached end of pmc buf: %lld >= %u\n", + *f_pos, (u32)pmc_size); pmc->last_cmd_status = -ERANGE; goto out; } wil_dbg_misc(wil, - "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", - __func__, *f_pos, idx, offset, count); + "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + *f_pos, idx, offset, count); /* if no errors, return the copied byte count */ retval = simple_read_from_buffer(buf, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 19ed127d4d05..7404b6f39c6a 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); if (rc || (status != WLAN_STATUS_SUCCESS)) { - wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n", - __func__, rc, status); + wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, + status); goto out; } @@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) txdata->addba_in_progress = true; rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout); if (rc) { - wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc); + wil_err(wil, "wmi_addba failed, rc (%d)", rc); txdata->addba_in_progress = false; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index c1b4bb03e997..072182e527e6 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -29,12 +29,12 @@ #include "trace.h" static bool rtap_include_phy_info; -module_param(rtap_include_phy_info, bool, S_IRUGO); +module_param(rtap_include_phy_info, bool, 0444); MODULE_PARM_DESC(rtap_include_phy_info, " Include PHY info in the radiotap header, default - no"); bool rx_align_2; -module_param(rx_align_2, bool, S_IRUGO); +module_param(rx_align_2, bool, 0444); MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); static inline uint wil_rx_snaplen(void) @@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) size_t sz = vring->size * sizeof(vring->va[0]); uint i; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "vring_alloc:\n"); BUILD_BUG_ON(sizeof(vring->va[0]) != 32); @@ -745,7 +745,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); return; } - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "rx_handle\n"); while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { (*quota)--; @@ -768,7 +768,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) struct vring *vring = &wil->vring_rx; int rc; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_init\n"); if (vring->va) { wil_err(wil, "Rx ring already allocated\n"); @@ -799,7 +799,7 @@ void wil_rx_fini(struct wil6210_priv *wil) { struct vring *vring = &wil->vring_rx; - wil_dbg_misc(wil, "%s()\n", __func__); + wil_dbg_misc(wil, "rx_fini\n"); if (vring->va) wil_vring_free(wil, vring, 0); @@ -851,7 +851,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); lockdep_assert_held(&wil->mutex); @@ -931,7 +931,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; - wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); lockdep_assert_held(&wil->mutex); @@ -993,7 +993,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) if (!vring->va) return; - wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); + wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id); spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; @@ -1032,12 +1032,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, struct vring *v = &wil->vring_tx[i]; struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", - __func__, eth->h_dest, i); + wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", + eth->h_dest, i); if (v->va && txdata->enabled) { return v; } else { - wil_dbg_txrx(wil, "vring[%d] not valid\n", i); + wil_dbg_txrx(wil, + "find_tx_ucast: vring[%d] not valid\n", + i); return NULL; } } @@ -1193,17 +1195,6 @@ found: return v; } -static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, - struct sk_buff *skb) -{ - struct wireless_dev *wdev = wil->wdev; - - if (wdev->iftype != NL80211_IFTYPE_AP) - return wil_find_tx_bcast_2(wil, skb); - - return wil_find_tx_bcast_1(wil, skb); -} - static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, int vring_index) { @@ -1373,8 +1364,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, int gso_type; int rc = -EINVAL; - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1643,8 +1634,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, bool mcast = (vring_index == wil->bcast_vring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", - __func__, skb->len, vring_index); + wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, + vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1884,7 +1875,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) static bool pr_once_fw; int rc; - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "start_xmit\n"); if (unlikely(!test_bit(wil_status_fwready, wil->status))) { if (!pr_once_fw) { wil_err(wil, "FW not ready\n"); @@ -1903,12 +1894,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) pr_once_fw = false; /* find vring */ - if (wil->wdev->iftype == NL80211_IFTYPE_STATION) { - /* in STA mode (ESS), all to same VRING */ + if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) { + /* in STA mode (ESS), all to same VRING (to AP) */ vring = wil_find_tx_vring_sta(wil, skb); - } else { /* direct communication, find matching VRING */ - vring = bcast ? wil_find_tx_bcast(wil, skb) : - wil_find_tx_ucast(wil, skb); + } else if (bcast) { + if (wil->pbss) + /* in pbss, no bcast VRING - duplicate skb in + * all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + else if (wil->wdev->iftype == NL80211_IFTYPE_AP) + /* AP has a dedicated bcast VRING */ + vring = wil_find_tx_bcast_1(wil, skb); + else + /* unexpected combination, fallback to duplicating + * the skb in all stations VRINGs + */ + vring = wil_find_tx_bcast_2(wil, skb); + } else { + /* unicast, find specific VRING by dest. address */ + vring = wil_find_tx_ucast(wil, skb); } if (unlikely(!vring)) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); @@ -1982,7 +1987,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) return 0; } - wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); + wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); used_before_complete = wil_vring_used_tx(vring); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 237e1666df2d..085a2dbfa21d 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -33,10 +33,12 @@ extern int agg_wsize; extern u32 vring_idle_trsh; extern bool rx_align_2; extern bool debug_fw; +extern bool disable_ap_sme; #define WIL_NAME "wil6210" -#define WIL_FW_NAME "wil6210.fw" /* code */ -#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */ +#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */ +#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */ +#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ @@ -98,6 +100,9 @@ static inline u32 wil_mtu2macbuf(u32 mtu) #define WIL6210_RX_HIGH_TRSH_INIT (0) #define WIL6210_RX_HIGH_TRSH_DEFAULT \ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3)) +#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see + * 802.11REVmc/D5.0, section 9.4.1.8) + */ /* Hardware definitions begin */ /* @@ -249,7 +254,12 @@ struct RGF_ICR { #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ - #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f) + #define JTAG_DEV_ID_SPARROW (0x2632072f) + +#define RGF_USER_REVISION_ID (0x88afe4) +#define RGF_USER_REVISION_ID_MASK (3) + #define REVISION_ID_SPARROW_B0 (0x0) + #define REVISION_ID_SPARROW_D0 (0x3) /* crash codes for FW/Ucode stored here */ #define RGF_FW_ASSERT_CODE (0x91f020) @@ -257,7 +267,8 @@ struct RGF_ICR { enum { HW_VER_UNKNOWN, - HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */ + HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ + HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ }; /* popular locations */ @@ -512,6 +523,7 @@ struct wil_sta_info { unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM]; struct wil_tid_crypto_rx group_crypto_rx; + u8 aid; /* 1-254; 0 if unknown/not reported */ }; enum { @@ -583,7 +595,9 @@ struct wil6210_priv { DECLARE_BITMAP(status, wil_status_last); u8 fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; + u8 chip_revision; const char *hw_name; + const char *wil_fw_name; DECLARE_BITMAP(hw_capabilities, hw_capability_last); DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX); u8 n_mids; /* number of additional MIDs as reported by FW */ @@ -653,6 +667,7 @@ struct wil6210_priv { struct dentry *debug; struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; u8 discovery_mode; + u8 abft_len; void *platform_handle; struct wil_platform_ops platform_ops; @@ -816,8 +831,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, - bool full_disconnect); +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta); int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); @@ -827,6 +842,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile); int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short); int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short); +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid); int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); @@ -918,6 +934,7 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); int wil_request_firmware(struct wil6210_priv *wil, const char *name, bool load); +bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name); int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); int wil_suspend(struct wil6210_priv *wil, bool is_runtime); diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index d051eea47a54..e53cf0cf7031 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) u32 host_min, dump_size, offset, len; if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) { - wil_err(wil, "%s: fail to obtain crash dump size\n", __func__); + wil_err(wil, "fail to obtain crash dump size\n"); return -EINVAL; } if (dump_size > size) { - wil_err(wil, "%s: not enough space for dump. Need %d have %d\n", - __func__, dump_size, size); + wil_err(wil, "not enough space for dump. Need %d have %d\n", + dump_size, size); return -EINVAL; } @@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) len = map->to - map->from; offset = map->host - host_min; - wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n", - __func__, fw_mapping[i].name, len, offset); + wil_dbg_misc(wil, + "fw_copy_crash_dump: - dump %s, size %d, offset %d\n", + fw_mapping[i].name, len, offset); wil_memcpy_fromio_32((void * __force)(dest + offset), (const void __iomem * __force)data, len); @@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil) u32 fw_dump_size; if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) { - wil_err(wil, "%s: fail to get fw dump size\n", __func__); + wil_err(wil, "fail to get fw dump size\n"); return; } @@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil) * after 5 min */ dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL); - wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__, - fw_dump_size); + wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size); } diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 7585003bef67..1f22c19696b1 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,16 +24,16 @@ #include "trace.h" static uint max_assoc_sta = WIL6210_MAX_CID; -module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); +module_param(max_assoc_sta, uint, 0644); MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); int agg_wsize; /* = 0; */ -module_param(agg_wsize, int, S_IRUGO | S_IWUSR); +module_param(agg_wsize, int, 0644); MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;" " 0 - use default; < 0 - don't auto-establish"); u8 led_id = WIL_LED_INVALID_ID; -module_param(led_id, byte, S_IRUGO); +module_param(led_id, byte, 0444); MODULE_PARM_DESC(led_id, " 60G device led enablement. Set the led ID (0-2) to enable"); @@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } ch = evt->channel + 1; - wil_info(wil, "Connect %pM channel [%d] cid %d\n", - evt->bssid, ch, evt->cid); + wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n", + evt->bssid, ch, evt->cid, evt->aid); wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); @@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (wil->sta[evt->cid].status != wil_sta_unused) { - wil_err(wil, "%s: AP: Invalid status %d for CID %d\n", - __func__, wil->sta[evt->cid].status, evt->cid); + wil_err(wil, "AP: Invalid status %d for CID %d\n", + wil->sta[evt->cid].status, evt->cid); mutex_unlock(&wil->mutex); return; } @@ -553,22 +553,19 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) rc = wil_tx_init(wil, evt->cid); if (rc) { - wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n", - __func__, evt->cid, rc); + wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", + evt->cid, rc); wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, - WLAN_REASON_UNSPECIFIED, false); + WLAN_REASON_UNSPECIFIED, false, false); } else { - wil_info(wil, "%s: successful connection to CID %d\n", - __func__, evt->cid); + wil_info(wil, "successful connection to CID %d\n", evt->cid); } if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (rc) { netif_carrier_off(ndev); - wil_err(wil, - "%s: cfg80211_connect_result with failure\n", - __func__); + wil_err(wil, "cfg80211_connect_result with failure\n"); cfg80211_connect_result(ndev, evt->bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, @@ -583,8 +580,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { - if (rc) + if (rc) { + if (disable_ap_sme) + /* notify new_sta has failed */ + cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL); goto out; + } memset(&sinfo, 0, sizeof(sinfo)); @@ -597,12 +598,13 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); } else { - wil_err(wil, "%s: unhandled iftype %d for CID %d\n", - __func__, wdev->iftype, evt->cid); + wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype, + evt->cid); goto out; } wil->sta[evt->cid].status = wil_sta_connected; + wil->sta[evt->cid].aid = evt->aid; set_bit(wil_status_fwconnected, wil->status); wil_update_net_queues_bh(wil, NULL, false); @@ -687,6 +689,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_vring_en_event *evt = d; u8 vri = evt->vring_index; + struct wireless_dev *wdev = wil_to_wdev(wil); wil_dbg_wmi(wil, "Enable vring %d\n", vri); @@ -694,7 +697,12 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) wil_err(wil, "Enable for invalid vring %d\n", vri); return; } - wil->vring_tx_data[vri].dot1x_open = true; + + if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme) + /* in AP mode with disable_ap_sme, this is done by + * wil_cfg80211_change_station() + */ + wil->vring_tx_data[vri].dot1x_open = true; if (vri == wil->bcast_vring) /* no BA for bcast */ return; if (agg_wsize >= 0) @@ -919,8 +927,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); if (immed_reply) { - wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", - __func__, wil->reply_id); + wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n", + wil->reply_id); kfree(evt); num_immed_reply++; complete(&wil->wmi_call); @@ -934,7 +942,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } } /* normally, 1 event per IRQ should be processed */ - wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__, + wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n", n - num_immed_reply, num_immed_reply); } @@ -950,6 +958,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, wil->reply_id = reply_id; wil->reply_buf = reply; wil->reply_size = reply_size; + reinit_completion(&wil->wmi_call); spin_unlock(&wil->wmi_ev_lock); rc = __wmi_send(wil, cmdid, buf, len); @@ -1069,6 +1078,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, + .disable_ap_sme = disable_ap_sme, + .abft_len = wil->abft_len, }; struct { struct wmi_cmd_hdr wmi; @@ -1086,6 +1097,13 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; } + if (disable_ap_sme && + !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + wil->fw_capabilities)) { + wil_err(wil, "disable_ap_sme not supported by FW\n"); + return -EOPNOTSUPP; + } + /* * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP @@ -1352,7 +1370,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) struct wmi_listen_started_event evt; } __packed reply; - wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off"); + wil_info(wil, "(%s)\n", on ? "on" : "off"); if (on) { rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, @@ -1456,12 +1474,15 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, - bool full_disconnect) +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, + u16 reason, bool full_disconnect, bool del_sta) { int rc; u16 reason_code; - struct wmi_disconnect_sta_cmd cmd = { + struct wmi_disconnect_sta_cmd disc_sta_cmd = { + .disconnect_reason = cpu_to_le16(reason), + }; + struct wmi_del_sta_cmd del_sta_cmd = { .disconnect_reason = cpu_to_le16(reason), }; struct { @@ -1469,12 +1490,19 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, struct wmi_disconnect_event evt; } __packed reply; - ether_addr_copy(cmd.dst_mac, mac); - - wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); + wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); - rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd), - WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); + if (del_sta) { + ether_addr_copy(del_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, + sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } else { + ether_addr_copy(disc_sta_cmd.dst_mac, mac); + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd, + sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID, + &reply, sizeof(reply), 1000); + } /* failure to disconnect in reasonable time treated as FW error */ if (rc) { wil_fw_error_recovery(wil); @@ -1507,8 +1535,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) .amsdu = 0, }; - wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__, - ringid, size, timeout); + wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, + timeout); return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd)); } @@ -1520,8 +1548,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__, - ringid, reason); + wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd)); } @@ -1533,8 +1560,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason) .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__, - cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason); + wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, + (cidxtid >> 4) & 0xf, reason); return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd)); } @@ -1686,11 +1713,29 @@ int wmi_abort_scan(struct wil6210_priv *wil) return rc; } +int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) +{ + int rc; + struct wmi_new_sta_cmd cmd = { + .aid = aid, + }; + + wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid); + + ether_addr_copy(cmd.dst_mac, mac); + + rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd)); + if (rc) + wil_err(wil, "Failed to send new sta (%d)\n", rc); + + return rc; +} + void wmi_event_flush(struct wil6210_priv *wil) { struct pending_wmi_event *evt, *t; - wil_dbg_wmi(wil, "%s()\n", __func__); + wil_dbg_wmi(wil, "event_flush\n"); list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); @@ -1731,8 +1776,8 @@ static void wmi_event_handle(struct wil6210_priv *wil, WARN_ON(wil->reply_buf); wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi)); - wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", - __func__, id); + wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n", + id); complete(&wil->wmi_call); return; } @@ -1779,11 +1824,11 @@ void wmi_event_worker(struct work_struct *work) struct pending_wmi_event *evt; struct list_head *lh; - wil_dbg_wmi(wil, "Start %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Start\n"); while ((lh = next_wmi_ev(wil)) != NULL) { evt = list_entry(lh, struct pending_wmi_event, list); wmi_event_handle(wil, &evt->event.hdr); kfree(evt); } - wil_dbg_wmi(wil, "Finished %s\n", __func__); + wil_dbg_wmi(wil, "event_worker: Finished\n"); } diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index d93a4d490d24..7c9fee57aa91 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity * * Permission to use, copy, modify, and/or distribute this software for any @@ -56,6 +56,8 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PS_CONFIG = 1, WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, + WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_MAX, }; @@ -185,8 +187,11 @@ enum wmi_command_id { WMI_RS_CFG_CMDID = 0x921, WMI_GET_DETAILED_RS_RES_CMDID = 0x922, WMI_AOA_MEAS_CMDID = 0x923, + WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924, WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930, WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931, + WMI_NEW_STA_CMDID = 0x935, + WMI_DEL_STA_CMDID = 0x936, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -543,7 +548,10 @@ struct wmi_pcp_start_cmd { u8 pcp_max_assoc_sta; u8 hidden_ssid; u8 is_go; - u8 reserved0[7]; + u8 reserved0[5]; + /* abft_len override if non-0 */ + u8 abft_len; + u8 disable_ap_sme; u8 network_type; u8 channel; u8 disable_sec_offload; @@ -902,6 +910,18 @@ struct wmi_set_mgmt_retry_limit_cmd { u8 reserved[3]; } __packed; +/* WMI_NEW_STA_CMDID */ +struct wmi_new_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + u8 aid; +} __packed; + +/* WMI_DEL_STA_CMDID */ +struct wmi_del_sta_cmd { + u8 dst_mac[WMI_MAC_LEN]; + __le16 disconnect_reason; +} __packed; + enum wmi_tof_burst_duration { WMI_TOF_BURST_DURATION_250_USEC = 2, WMI_TOF_BURST_DURATION_500_USEC = 3, @@ -1067,6 +1087,7 @@ enum wmi_event_id { WMI_RS_CFG_DONE_EVENTID = 0x1921, WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, WMI_AOA_MEAS_EVENTID = 0x1923, + WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924, WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930, WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, WMI_TOF_SESSION_END_EVENTID = 0x1991, @@ -1287,12 +1308,13 @@ struct wmi_connect_event { u8 assoc_req_len; u8 assoc_resp_len; u8 cid; - u8 reserved2[3]; + u8 aid; + u8 reserved2[2]; /* not in use */ u8 assoc_info[0]; } __packed; -/* WMI_DISCONNECT_EVENTID */ +/* disconnect_reason */ enum wmi_disconnect_reason { WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01, /* bmiss */ @@ -1310,6 +1332,7 @@ enum wmi_disconnect_reason { WMI_DIS_REASON_IBSS_MERGE = 0x0E, }; +/* WMI_DISCONNECT_EVENTID */ struct wmi_disconnect_event { /* reason code, see 802.11 spec. */ __le16 protocol_reason_status; @@ -1759,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event { u8 reserved[3]; } __packed; +/* BRP antenna limit mode */ +enum wmi_brp_ant_limit_mode { + /* Disable BRP force antenna limit */ + WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00, + /* Define maximal antennas limit. Only effective antennas will be + * actually used + */ + WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01, + /* Force a specific number of antennas */ + WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02, + /* number of BRP antenna limit modes */ + WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03, +}; + +/* WMI_BRP_SET_ANT_LIMIT_CMDID */ +struct wmi_brp_set_ant_limit_cmd { + /* connection id */ + u8 cid; + /* enum wmi_brp_ant_limit_mode */ + u8 limit_mode; + /* antenna limit count, 1-27 + * disable_mode - ignored + * effective_mode - upper limit to number of antennas to be used + * force_mode - exact number of antennas to be used + */ + u8 ant_limit; + u8 reserved; +} __packed; + +/* WMI_BRP_SET_ANT_LIMIT_EVENTID */ +struct wmi_brp_set_ant_limit_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* broadcast connection ID */ #define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 72139b579b18..5bc2ba214735 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1104,6 +1104,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), { /* end: all zeroes */ } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index e21f7600122b..76693df34742 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -218,9 +218,6 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len) * interface functions from common layer */ -bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, - int prec); - /* Receive frame for delivery to OS. Callee disposes of rxp. */ void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event); /* Receive async event packet from firmware. Callee disposes of rxp. */ @@ -241,13 +238,12 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); -int brcmf_bus_start(struct device *dev); +int brcmf_bus_started(struct device *dev); s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len); void brcmf_bus_add_txhdrlen(struct device *dev, uint len); #ifdef CONFIG_BRCMFMAC_SDIO void brcmf_sdio_exit(void); -void brcmf_sdio_init(void); void brcmf_sdio_register(void); #endif #ifdef CONFIG_BRCMFMAC_USB diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7ffc4aba5bab..0e28d0710af5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -138,7 +138,6 @@ static struct ieee80211_rate __wl_rates[] = { .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ - .flags = IEEE80211_CHAN_DISABLED, \ .max_antenna_gain = 0, \ .max_power = 30, \ } @@ -147,7 +146,6 @@ static struct ieee80211_rate __wl_rates[] = { .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ - .flags = IEEE80211_CHAN_DISABLED, \ .max_antenna_gain = 0, \ .max_power = 30, \ } @@ -328,7 +326,7 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, * triples, returning a pointer to the substring whose first element * matches tag */ -const struct brcmf_tlv * +static const struct brcmf_tlv * brcmf_parse_tlvs(const void *buf, int buflen, uint key) { const struct brcmf_tlv *elt = buf; @@ -3332,7 +3330,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, goto out_err; } - data += sizeof(struct brcmf_pno_scanresults_le); netinfo_start = brcmf_get_netinfo_array(pfn_result); for (i = 0; i < result_count; i++) { @@ -3480,8 +3477,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, return -EINVAL; } - data += sizeof(struct brcmf_pno_scanresults_le); - netinfo = (struct brcmf_pno_net_info_le *)data; + netinfo = brcmf_get_netinfo_array(pfn_result); memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; cfg->wowl.nd->n_channels = 1; @@ -3971,7 +3967,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, pval |= AES_ENABLED; break; default: - brcmf_err("Ivalid unicast security info\n"); + brcmf_err("Invalid unicast security info\n"); } offset++; } @@ -4015,7 +4011,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, wpa_auth |= WPA2_AUTH_1X_SHA256; break; default: - brcmf_err("Ivalid key mgmt info\n"); + brcmf_err("Invalid key mgmt info\n"); } offset++; } @@ -5071,6 +5067,29 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, return ret; } +static int +brcmf_cfg80211_update_conn_params(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_connect_params *sme, + u32 changed) +{ + struct brcmf_if *ifp; + int err; + + if (!(changed & UPDATE_ASSOC_IES)) + return 0; + + ifp = netdev_priv(ndev); + err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG, + sme->ie, sme->ie_len); + if (err) + brcmf_err("Set Assoc REQ IE Failed\n"); + else + brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); + + return err; +} + #ifdef CONFIG_PM static int brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, @@ -5138,6 +5157,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .crit_proto_start = brcmf_cfg80211_crit_proto_start, .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, .tdls_oper = brcmf_cfg80211_tdls_oper, + .update_connect_params = brcmf_cfg80211_update_conn_params, }; struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, @@ -5825,7 +5845,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 i, j; u32 total; u32 chaninfo; - u32 index; pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); @@ -5873,33 +5892,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw == BRCMU_CHAN_BW_80) continue; - channel = band->channels; - index = band->n_channels; + channel = NULL; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.control_ch_num) { - index = j; + if (band->channels[j].hw_value == ch.control_ch_num) { + channel = &band->channels[j]; break; } } - channel[index].center_freq = - ieee80211_channel_to_frequency(ch.control_ch_num, - band->band); - channel[index].hw_value = ch.control_ch_num; + if (!channel) { + /* It seems firmware supports some channel we never + * considered. Something new in IEEE standard? + */ + brcmf_err("Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); + continue; + } /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ if (ch.bw == BRCMU_CHAN_BW_80) { - channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ; + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; } else if (ch.bw == BRCMU_CHAN_BW_40) { - brcmf_update_bw40_channel_flag(&channel[index], &ch); + brcmf_update_bw40_channel_flag(channel, &ch); } else { /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. */ - channel[index].flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + channel->flags = IEEE80211_CHAN_NO_HT40 | + IEEE80211_CHAN_NO_80MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; @@ -5907,11 +5929,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) - channel[index].flags |= + channel->flags |= (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR); if (chaninfo & WL_CHAN_PASSIVE) - channel[index].flags |= + channel->flags |= IEEE80211_CHAN_NO_IR; } } @@ -6341,7 +6363,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) } #ifdef CONFIG_PM -static struct wiphy_wowlan_support brcmf_wowlan_support = { +static const struct wiphy_wowlan_support brcmf_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, .n_patterns = BRCMF_WOWL_MAXPATTERNS, .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE, @@ -6354,19 +6376,29 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) { #ifdef CONFIG_PM struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct wiphy_wowlan_support *wowl; + + wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support), + GFP_KERNEL); + if (!wowl) { + brcmf_err("only support basic wowlan features\n"); + wiphy->wowlan = &brcmf_wowlan_support; + return; + } if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) { - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + wowl->flags |= WIPHY_WOWLAN_NET_DETECT; + wowl->max_nd_match_sets = BRCMF_PNO_MAX_PFN_COUNT; init_waitqueue_head(&cfg->wowl.nd_data_wait); } } if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) { - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; - brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; + wowl->flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; + wowl->flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; } - wiphy->wowlan = &brcmf_wowlan_support; + wiphy->wowlan = wowl; #endif } @@ -6477,8 +6509,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->bands[NL80211_BAND_5GHZ] = band; } } - err = brcmf_setup_wiphybands(wiphy); - return err; + return 0; } static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) @@ -6748,6 +6779,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy) kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels); kfree(wiphy->bands[NL80211_BAND_5GHZ]); } +#if IS_ENABLED(CONFIG_PM) + if (wiphy->wowlan != &brcmf_wowlan_support) + kfree(wiphy->wowlan); +#endif wiphy_free(wiphy); } @@ -6843,6 +6878,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, goto priv_out; } + err = brcmf_setup_wiphybands(wiphy); + if (err) { + brcmf_err("Setting wiphy bands failed (%d)\n", err); + goto wiphy_unreg_out; + } + /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(), * setup 40MHz in 2GHz band and enable OBSS scanning. */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 0c9a7081fca9..8f19d95d4175 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -396,8 +396,6 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len); s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif); -const struct brcmf_tlv * -brcmf_parse_tlvs(const void *buf, int buflen, uint key); u16 channel_to_chanspec(struct brcmu_d11inf *d11inf, struct ieee80211_channel *ch); bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 3e15d64c6481..f7c8c2e80349 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -74,7 +74,7 @@ module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine"); #ifdef DEBUG -/* always succeed brcmf_bus_start() */ +/* always succeed brcmf_bus_started() */ static int brcmf_ignore_probe_fail; module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0); MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); @@ -299,11 +299,9 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, } } } - if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) { - /* No platform data for this device. In case of SDIO try OF - * (Open Firwmare) Device Tree. - */ - brcmf_of_probe(dev, &settings->bus.sdio); + if (!found) { + /* No platform data for this device, try OF (Open Firwmare) */ + brcmf_of_probe(dev, bus_type, settings); } return settings; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index bd095abca393..a62f8e70b320 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -65,6 +65,8 @@ struct brcmf_mp_device { } bus; }; +void brcmf_c_set_joinpref_default(struct brcmf_if *ifp); + struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, enum brcmf_bus_type bus_type, u32 chip, u32 chiprev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 9e6f60a0ec3e..b73a55b00fa7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -966,7 +966,7 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data) return 0; } -int brcmf_bus_start(struct device *dev) +int brcmf_bus_started(struct device *dev) { int ret = -1; struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -1075,16 +1075,6 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len) } } -static void brcmf_bus_detach(struct brcmf_pub *drvr) -{ - brcmf_dbg(TRACE, "Enter\n"); - - if (drvr) { - /* Stop the bus module */ - brcmf_bus_stop(drvr->bus_if); - } -} - void brcmf_dev_reset(struct device *dev) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -1131,7 +1121,7 @@ void brcmf_detach(struct device *dev) brcmf_fws_deinit(drvr); - brcmf_bus_detach(drvr); + brcmf_bus_stop(drvr->bus_if); brcmf_proto_detach(drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index c94dcab260d0..de3197be5491 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -216,7 +216,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); -void brcmf_c_set_joinpref_default(struct brcmf_if *ifp); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 425c41dc0a59..aee6e5937c41 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -23,14 +23,17 @@ #include "common.h" #include "of.h" -void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) +void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) { + struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; struct device_node *np = dev->of_node; int irq; u32 irqf; u32 val; - if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) + if (!np || bus_type != BRCMF_BUSTYPE_SDIO || + !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index a9d94c15d0f5..95b7032d54b1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h @@ -14,9 +14,11 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef CONFIG_OF -void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio); +void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings); #else -static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) +static void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) { } #endif /* CONFIG_OF */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 048027f2085b..6fae4cf3f6ab 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -601,7 +601,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo) { u32 config; - brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); /* BAR1 window may not be sized properly */ brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); @@ -1572,7 +1571,7 @@ static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo) if (ret) { brcmf_err("brcmf_attach failed\n"); } else { - ret = brcmf_bus_start(&devinfo->pdev->dev); + ret = brcmf_bus_started(&devinfo->pdev->dev); if (ret) brcmf_err("dongle is not responding\n"); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index dfb0658713d9..c5744b45ec8f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1661,7 +1661,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) pfirst->len, pfirst->next, pfirst->prev); skb_unlink(pfirst, &bus->glom); - if (brcmf_sdio_fromevntchan(pfirst->data)) + if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN])) brcmf_rx_event(bus->sdiodev->dev, pfirst); else brcmf_rx_frame(bus->sdiodev->dev, pfirst, @@ -4065,7 +4065,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, sdio_release_host(sdiodev->func[1]); - err = brcmf_bus_start(dev); + err = brcmf_bus_started(dev); if (err != 0) { brcmf_err("dongle is not responding\n"); goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 2f978a39b58a..d93ebbdc7737 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1148,7 +1148,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; - ret = brcmf_bus_start(devinfo->dev); + ret = brcmf_bus_started(devinfo->dev); if (ret) goto fail; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 466912eb2d87..e8e65115feba 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3469,7 +3469,7 @@ static struct attribute_group il3945_attribute_group = { .attrs = il3945_sysfs_entries, }; -static struct ieee80211_ops il3945_mac_ops __read_mostly = { +static struct ieee80211_ops il3945_mac_ops __ro_after_init = { .tx = il3945_mac_tx, .start = il3945_mac_start, .stop = il3945_mac_stop, @@ -3627,15 +3627,6 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) il->cmd_queue = IL39_CMD_QUEUE_NUM; - /* - * Disabling hardware scan means that mac80211 will perform scans - * "the hard way", rather than using device's scan. - */ - if (il3945_mod_params.disable_hw_scan) { - D_INFO("Disabling hw_scan\n"); - il3945_mac_ops.hw_scan = NULL; - } - D_INFO("*** LOAD DRIVER ***\n"); il->cfg = cfg; il->ops = &il3945_ops; @@ -3913,6 +3904,15 @@ il3945_init(void) pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); pr_info(DRV_COPYRIGHT "\n"); + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (il3945_mod_params.disable_hw_scan) { + pr_info("hw_scan is disabled\n"); + il3945_mac_ops.hw_scan = NULL; + } + ret = il3945_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index affe760c8c22..376c79337a0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, { struct iwl_priv *priv = file->private_data; bool restart_fw = iwlwifi_mod_params.restart_fw; - int ret; + int __maybe_unused ret; iwlwifi_mod_params.restart_fw = true; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 8c0719468d00..2a04d0cd71ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -163,7 +163,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, REGULATORY_DISABLE_BEACON_HINTS; #ifdef CONFIG_PM_SLEEP - if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec && priv->trans->ops->d3_suspend && priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index b95c2d76db33..710dbbefd551 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -364,7 +364,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv, /* get the traffic load value for tid */ -static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) { u32 curr_time = jiffies_to_msecs(jiffies); u32 time_diff; @@ -372,14 +372,14 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) struct iwl_traffic_load *tl = NULL; if (tid >= IWL_MAX_TID_COUNT) - return 0; + return; tl = &(lq_data->load[tid]); curr_time -= curr_time % TID_ROUND_VALUE; if (!(tl->queue_count)) - return 0; + return; time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); index = time_diff / TID_QUEUE_CELL_SPACING; @@ -388,8 +388,6 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) /* TID_MAX_TIME_DIFF */ if (index >= TID_QUEUE_MAX_SIZE) rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; } static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, @@ -397,7 +395,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, struct ieee80211_sta *sta) { int ret = -EAGAIN; - u32 load; /* * Don't create TX aggregation sessions when in high @@ -410,7 +407,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, return ret; } - load = rs_tl_get_load(lq_data, tid); + rs_tl_get_load(lq_data, tid); IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index c7509c51e9d9..d6013bfe991c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -407,7 +407,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv) lockdep_assert_held(&priv->mutex); /* No init ucode required? Curious, but maybe ok */ - if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) + if (!priv->fw->img[IWL_UCODE_INIT].num_sec) return 0; iwl_init_notification_wait(&priv->notif_wait, &calib_wait, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index 0b9f6a7bc834..39335b7b0c16 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -371,4 +371,4 @@ const struct iwl_cfg iwl6000_3agn_cfg = { MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index d4b73dedf89b..a72e58623d3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 26 -#define IWL3168_UCODE_API_MAX 26 +#define IWL7265D_UCODE_API_MAX 28 +#define IWL3168_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 8d3e53fac1da..b7953bf55f6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 26 -#define IWL8265_UCODE_API_MAX 26 +#define IWL8000_UCODE_API_MAX 28 +#define IWL8265_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index ff850410d897..a5f0c0bf85ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 26 +#define IWL9000_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index ea1618525878..82f18d967c40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 26 +#define IWL_A000_UCODE_API_MAX 28 /* Lowest firmware API version supported */ #define IWL_A000_UCODE_API_MIN 24 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 45b2f679e4d8..d22821501676 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -166,8 +166,9 @@ static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) { int i; - for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) + for (i = 0; i < img->num_sec; i++) iwl_free_fw_desc(drv, &img->sec[i]); + kfree(img->sec); } static void iwl_dealloc_ucode(struct iwl_drv *drv) @@ -179,8 +180,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.dbg_conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) - kfree(drv->fw.dbg_mem_tlv[i]); + kfree(drv->fw.dbg_mem_tlv); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -241,7 +241,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) } struct fw_img_parsing { - struct fw_sec sec[IWL_UCODE_SECTION_MAX]; + struct fw_sec *sec; int sec_counter; }; @@ -276,7 +276,8 @@ struct iwl_firmware_pieces { size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; + size_t n_dbg_mem_tlv; }; /* @@ -290,11 +291,33 @@ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, return &pieces->img[type].sec[sec]; } +static void alloc_sec_data(struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type, + int sec) +{ + struct fw_img_parsing *img = &pieces->img[type]; + struct fw_sec *sec_memory; + int size = sec + 1; + size_t alloc_size = sizeof(*img->sec) * size; + + if (img->sec && img->sec_counter >= size) + return; + + sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL); + if (!sec_memory) + return; + + img->sec = sec_memory; + img->sec_counter = size; +} + static void set_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, const void *data) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].data = data; } @@ -303,6 +326,8 @@ static void set_sec_size(struct iwl_firmware_pieces *pieces, int sec, size_t size) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].size = size; } @@ -318,6 +343,8 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces, int sec, u32 offset) { + alloc_sec_data(pieces, type, sec); + pieces->img[type].sec[sec].offset = offset; } @@ -383,6 +410,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, struct fw_img_parsing *img; struct fw_sec *sec; struct fw_sec_parsing *sec_parse; + size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; @@ -390,6 +418,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, sec_parse = (struct fw_sec_parsing *)data; img = &pieces->img[type]; + + alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); + sec = krealloc(img->sec, alloc_size, GFP_KERNEL); + if (!sec) + return -ENOMEM; + img->sec = sec; + sec = &img->sec[img->sec_counter]; sec->offset = le32_to_cpu(sec_parse->offset); @@ -1009,31 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (void *)tlv_data; u32 type; + size_t size; + struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; type = le32_to_cpu(dbg_mem->data_type); - drv->fw.dbg_dynamic_mem = true; - if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) { - IWL_ERR(drv, - "Skip unknown dbg mem segment: %u\n", - dbg_mem->data_type); - break; - } + IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", + dbg_mem->data_type); - if (pieces->dbg_mem_tlv[type]) { - IWL_ERR(drv, - "Ignore duplicate mem segment: %u\n", - dbg_mem->data_type); + switch (type & FW_DBG_MEM_TYPE_MASK) { + case FW_DBG_MEM_TYPE_REGULAR: + case FW_DBG_MEM_TYPE_PRPH: + /* we know how to handle these */ break; + default: + IWL_ERR(drv, + "Found debug memory segment with invalid type: 0x%x\n", + type); + return -EINVAL; } - IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", - dbg_mem->data_type); - - pieces->dbg_mem_tlv[type] = dbg_mem; + size = sizeof(*pieces->dbg_mem_tlv) * + (pieces->n_dbg_mem_tlv + 1); + n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); + if (!n) + return -ENOMEM; + pieces->dbg_mem_tlv = n; + pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem; + pieces->n_dbg_mem_tlv++; break; } default: @@ -1083,12 +1124,18 @@ static int iwl_alloc_ucode(struct iwl_drv *drv, enum iwl_ucode_type type) { int i; - for (i = 0; - i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); - i++) - if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), - get_sec(pieces, type, i))) + struct fw_desc *sec; + + sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + drv->fw.img[type].sec = sec; + drv->fw.img[type].num_sec = pieces->img[type].sec_counter; + + for (i = 0; i < pieces->img[type].sec_counter; i++) + if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) return -ENOMEM; + return 0; } @@ -1345,19 +1392,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) { - if (pieces->dbg_mem_tlv[i]) { - drv->fw.dbg_mem_tlv[i] = - kmemdup(pieces->dbg_mem_tlv[i], - sizeof(*drv->fw.dbg_mem_tlv[i]), - GFP_KERNEL); - if (!drv->fw.dbg_mem_tlv[i]) - goto out_free_fw; - } - } - /* Now that we can no longer fail, copy information */ + drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv; + pieces->dbg_mem_tlv = NULL; + drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv; + /* * The (size - 16) / 12 formula is based on the information recorded * for each event, which is of mode 1 (including timestamp) for all @@ -1441,25 +1481,27 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) op->name, err); #endif } - kfree(pieces); - return; + goto free; try_again: /* try next, if any */ release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; - kfree(pieces); - return; + goto free; out_free_fw: IWL_ERR(drv, "failed to allocate pci memory\n"); iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: - kfree(pieces); complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + free: + for (i = 0; i < ARRAY_SIZE(pieces->img); i++) + kfree(pieces->img[i].sec); + kfree(pieces->dbg_mem_tlv); + kfree(pieces); } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 84813b550ef1..d01701ee4777 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -379,7 +379,6 @@ enum iwl_ucode_tlv_capa { * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ -#define IWL_UCODE_SECTION_MAX 16 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC #define PAGING_SEPARATOR_SECTION 0xAAAABBBB @@ -489,25 +488,22 @@ enum iwl_fw_dbg_monitor_mode { }; /** - * enum iwl_fw_mem_seg_type - data types for dumping on error - * - * @FW_DBG_MEM_SMEM: the data type is SMEM - * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC - * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC + * enum iwl_fw_mem_seg_type - memory segment type + * @FW_DBG_MEM_TYPE_MASK: mask for the type indication + * @FW_DBG_MEM_TYPE_REGULAR: regular memory + * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading) */ -enum iwl_fw_dbg_mem_seg_type { - FW_DBG_MEM_DCCM_LMAC = 0, - FW_DBG_MEM_DCCM_UMAC, - FW_DBG_MEM_SMEM, - - /* Must be last */ - FW_DBG_MEM_MAX, +enum iwl_fw_mem_seg_type { + FW_DBG_MEM_TYPE_MASK = 0xff000000, + FW_DBG_MEM_TYPE_REGULAR = 0x00000000, + FW_DBG_MEM_TYPE_PRPH = 0x01000000, }; /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * - * @data_type: enum %iwl_fw_mem_seg_type + * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type + * for what we care about * @ofs: the memory segment offset * @len: the memory segment length, in bytes * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 5f229556339a..d323b70b510a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -132,7 +132,8 @@ struct fw_desc { }; struct fw_img { - struct fw_desc sec[IWL_UCODE_SECTION_MAX]; + struct fw_desc *sec; + int num_sec; bool is_dual_cpus; u32 paging_mem_size; }; @@ -295,8 +296,8 @@ struct iwl_fw { struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; - bool dbg_dynamic_mem; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; + size_t n_dbg_mem_tlv; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; struct iwl_gscan_capabilities gscan_capa; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b88e2048ae0b..207d8ae1e116 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - if (mvm->restart_fw > 0) { - mvm->restart_fw--; - ieee80211_restart_hw(mvm->hw); - } iwl_mvm_free_nd(mvm); + + if (!unified_image) { + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + if (mvm->restart_fw > 0) { + mvm->restart_fw--; + ieee80211_restart_hw(mvm->hw); + } + } } out_noreset: mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 7b7d2a146e30..a260cd503200 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret; + int __maybe_unused ret; mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index ae12badc0c2a..567597c26115 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -2075,7 +2075,7 @@ struct iwl_mu_group_mgmt_notif { * @system_time: system time on air rise * @tsf: TSF on air rise * @beacon_timestamp: beacon on air rise - * @phy_flags: general phy flags: band, modulation, etc. + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count @@ -2084,12 +2084,12 @@ struct iwl_stored_beacon_notif { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; - __le16 phy_flags; + __le16 band; __le16 channel; __le32 rates; __le32 byte_count; u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ #define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 2e8e3e8e30a3..e7b3b712d778 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -406,46 +406,63 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; -static u32 iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) +static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + u32 i; + + for (i = 0; i < len_bytes; i += 4) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); +} + +static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + unsigned long flags; + bool success = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + success = true; + _iwl_read_prph_block(trans, start, len_bytes, data); + iwl_trans_release_nic_access(trans, &flags); + } + + return success; +} + +static void iwl_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) { struct iwl_fw_error_dump_prph *prph; unsigned long flags; - u32 prph_len = 0, i; + u32 i; if (!iwl_trans_grab_nic_access(trans, &flags)) - return 0; + return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; - int reg; - __le32 *val; - - prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - val = (void *)prph->data; - for (reg = iwl_prph_dump_addr[i].start; - reg <= iwl_prph_dump_addr[i].end; - reg += 4) - *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans, - reg)); + _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, + /* our range is inclusive, hence + 4 */ + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4, + (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans, &flags); - - return prph_len; } /* @@ -495,11 +512,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_mvm_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem = - mvm->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len; + u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; + u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; bool monitor_dump_only = false; int i; @@ -624,10 +640,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for MEM segments */ - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i]->len); + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); } /* Make room for fw's virtual image pages, if it exists */ @@ -656,7 +671,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; - if (!mvm->fw->dbg_dynamic_mem) + if (!mvm->fw->n_dbg_mem_tlv) file_len += sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); @@ -708,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - if (!mvm->fw->dbg_dynamic_mem) { + if (!mvm->fw->n_dbg_mem_tlv) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -719,22 +734,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data = iwl_fw_error_next_data(dump_data); } - for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { - if (fw_dbg_mem[i]) { - u32 len = le32_to_cpu(fw_dbg_mem[i]->len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs); - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(len + - sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = fw_dbg_mem[i]->data_type; - dump_mem->offset = cpu_to_le32(ofs); + for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + bool success; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i].data_type; + dump_mem->offset = cpu_to_le32(ofs); + + switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { + case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): iwl_trans_read_mem_bytes(mvm->trans, ofs, dump_mem->data, len); - dump_data = iwl_fw_error_next_data(dump_data); + success = true; + break; + case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): + success = iwl_read_prph_block(mvm->trans, ofs, len, + (void *)dump_mem->data); + break; + default: + /* + * shouldn't get here, we ignored this kind + * of TLV earlier during the TLV parsing?! + */ + WARN_ON(1); + success = false; } + + if (success) + dump_data = iwl_fw_error_next_data(dump_data); } if (smem_len) { @@ -816,11 +848,12 @@ dump_trans_data: sg_nents(sg_dump_data), fw_error_dump->op_mode_ptr, fw_error_dump->op_mode_len, 0); - sg_pcopy_from_buffer(sg_dump_data, - sg_nents(sg_dump_data), - fw_error_dump->trans_ptr->data, - fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); + if (fw_error_dump->trans_ptr) + sg_pcopy_from_buffer(sg_dump_data, + sg_nents(sg_dump_data), + fw_error_dump->trans_ptr->data, + fw_error_dump->trans_ptr->len, + fw_error_dump->op_mode_len); dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 872066317fa5..b278e44e97ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * CPU2 paging CSS * CPU2 paging image (including instruction and data) */ - for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { sec_idx++; break; @@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) * If paging is enabled there should be at least 2 more sections left * (one for CSS and one for Paging data) */ - if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { + if (sec_idx >= image->num_sec - 1) { IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); iwl_free_fw_paging(mvm); return -EINVAL; @@ -259,9 +259,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, { struct page *block; dma_addr_t phys = 0; - int blk_idx = 0; - int order, num_of_pages; - int dma_enabled; + int blk_idx, order, num_of_pages, size, dma_enabled; if (mvm->fw_paging_db[0].fw_paging_block) return 0; @@ -272,9 +270,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = ((num_of_pages - 1) / - NUM_OF_PAGE_PER_GROUP) + 1; - + mvm->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); mvm->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); @@ -284,46 +281,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, mvm->num_of_paging_blk, mvm->num_of_pages_in_last_blk); - /* allocate block of 4Kbytes for paging CSS */ - order = get_order(FW_PAGING_SIZE); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one since - * we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - /* - * allocate blocks in dram. - * since that CSS allocated in fw_paging_db[0] loop start from index 1 + * Allocate CSS and paging blocks in dram. */ - for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* allocate block of PAGING_BLOCK_SIZE (32K) */ - order = get_order(PAGING_BLOCK_SIZE); + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ @@ -332,7 +296,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, } mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + mvm->fw_paging_db[blk_idx].fw_paging_size = size; if (dma_enabled) { phys = dma_map_page(mvm->trans->dev, block, 0, @@ -353,9 +317,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, blk_idx << BLOCK_2_EXP_SIZE; } - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); + if (!blk_idx) + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); } return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 4a0874e40731..ebf6c071eb36 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1565,7 +1565,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = - (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 45122dafe922..71f9aa9f7c7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; + /* this is the case for CCK frames, it's better (only 8) for OFDM */ + hw->radiotap_timestamp.accuracy = 22; + hw->rate_control_algorithm = "iwl-mvm-rs"; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; @@ -670,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->wowlan = &mvm->wowlan; } - if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4a9cb76b7611..a672aa71c656 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1657,8 +1657,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, * Disable a TXQ. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. */ -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); /* Return a bitmask with all the hw supported queues, except for the diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 227c5ed9cbe6..80f99c365b6a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct iwl_mvm_sta *mvmsta; - struct iwl_mvm_vif *mvmvif; - if (!sta->ht_cap.ht_supported) return false; @@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) return false; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (mvm->nvm_data->sku_cap_mimo_disabled) return false; @@ -3071,7 +3065,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) { - u8 nss = 0, mcs = 0; + u8 nss = 0; spin_lock(&mvm->drv_stats_lock); @@ -3099,11 +3093,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) if (rate & RATE_MCS_HT_MSK) { mvm->drv_rx_stats.ht_frames++; - mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; } else if (rate & RATE_MCS_VHT_MSK) { mvm->drv_rx_stats.vht_frames++; - mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0e60e38b2acf..e16687d5afaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -621,12 +621,10 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, }; int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) : sizeof(struct iwl_notif_statistics_v10); - u32 temperature; if (iwl_rx_packet_payload_len(pkt) != expected_size) goto invalid; - temperature = le32_to_cpu(stats->general.radio_temperature); data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.beacon_filter_average_energy; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 09e9e2e3ed04..19fd55c66705 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -454,13 +454,6 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) rcu_read_unlock(); - spin_lock_bh(&mvm->queue_info_lock); - /* Unmap MAC queues and TIDs from this queue */ - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; - spin_unlock_bh(&mvm->queue_info_lock); - return disable_agg_tids; } @@ -755,28 +748,22 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * first */ if (using_inactive_queue) { - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_DISABLE_QUEUE, - }; - u8 txq_curr_ac; - - disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + u8 txq_curr_ac, sta_id; spin_lock_bh(&mvm->queue_info_lock); txq_curr_ac = mvm->queue_info[queue].mac80211_ac; - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac]; - cmd.tid = mvm->queue_info[queue].txq_tid; + sta_id = mvm->queue_info[queue].ra_sta_id; spin_unlock_bh(&mvm->queue_info_lock); + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), - &cmd); + + ret = iwl_mvm_disable_txq(mvm, queue, + mvmsta->vif->hw_queue[txq_curr_ac], + tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -791,7 +778,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } /* If TXQ is allocated to another STA, update removal in FW */ - if (cmd.sta_id != mvmsta->sta_id) + if (sta_id != mvmsta->sta_id) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); } @@ -868,7 +855,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; - s8 sta_id; int tid; unsigned long tid_bitmap; int ret; @@ -876,7 +862,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->queue_info_lock); - sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; spin_unlock_bh(&mvm->queue_info_lock); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 66957ac12ca4..2b2db38eee3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd) +static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info) { + u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); - u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); u8 protocol = 0; /* @@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, * compute it */ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) - return; + goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || @@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); - return; + goto out; } if (skb->protocol == htons(ETH_P_IP)) { @@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); - return; + goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); @@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); - return; + goto out; } /* enable L4 csum */ @@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; - tx_cmd->offload_assist = cpu_to_le16(offload_assist); +out: #endif + return offload_assist; } /* @@ -295,7 +295,52 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); - iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); + tx_cmd->offload_assist |= + cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info)); +} + +static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta) +{ + int rate_idx; + u8 rate_plcp; + u32 rate_flags; + + /* HT rate doesn't make sense for a non data frame */ + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, + "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", + info->control.rates[0].flags, + info->control.rates[0].idx); + + rate_idx = info->control.rates[0].idx; + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = rate_lowest_index( + &mvm->nvm_data->bands[info->band], sta); + + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == NL80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); + + if (info->band == NL80211_BAND_2GHZ && + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; + else + rate_flags = + BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + return (u32)rate_plcp | rate_flags; } /* @@ -305,10 +350,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { - u32 rate_flags; - int rate_idx; - u8 rate_plcp; - /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; @@ -337,46 +378,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } - /* HT rate doesn't make sense for a non data frame */ - WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, - "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n", - info->control.rates[0].flags, - info->control.rates[0].idx, - le16_to_cpu(fc)); - - rate_idx = info->control.rates[0].idx; - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == NL80211_BAND_5GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); - mvm->mgmt_last_antenna_idx = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), mvm->mgmt_last_antenna_idx); - if (info->band == NL80211_BAND_2GHZ && - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; - else - rate_flags = - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; - - /* Set CCK flag as needed */ - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); + tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d04babd99b53..26b853ef195f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -693,10 +693,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .tid = cfg->tid, }; - /* Set sta_id in the command, if it exists */ - if (iwl_mvm_is_dqa_supported(mvm)) - cmd.sta_id = cfg->sta_id; - iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), @@ -706,8 +702,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } } -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) +int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, @@ -720,7 +716,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); @@ -760,7 +756,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) { spin_unlock_bh(&mvm->queue_info_lock); - return; + return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; @@ -791,6 +787,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); + + return ret; } /** diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b10e3633df1a..c1d99d15796d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, (*first_ucode_section)++; } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* @@ -868,19 +868,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, int cpu, int *first_ucode_section) { - int shift_param; int i, ret = 0; u32 last_read_idx = 0; - if (cpu == 1) { - shift_param = 0; + if (cpu == 1) *first_ucode_section = 0; - } else { - shift_param = 16; + else (*first_ucode_section)++; - } - for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* @@ -1066,6 +1062,20 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } +static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +{ + bool hw_rfkill = iwl_is_rfkill_set(trans); + + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + return hw_rfkill; +} + static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1208,12 +1218,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1261,13 +1266,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1659,7 +1658,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; int err; lockdep_assert_held(&trans_pcie->mutex); @@ -1683,13 +1681,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* Set is_down to false here so that...*/ trans_pcie->is_down = false; - hw_rfkill = iwl_is_rfkill_set(trans); - if (hw_rfkill) - set_bit(STATUS_RFKILL, &trans->status); - else - clear_bit(STATUS_RFKILL, &trans->status); - /* ... rfkill can call stop_device and set it false if needed */ - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + /* ...rfkill can call stop_device and set it false if needed */ + iwl_trans_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 7ff2efadceca..3f97acb57e66 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -2086,7 +2086,7 @@ static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, * Initialization */ -static struct cfg80211_ops lbs_cfg80211_ops = { +static const struct cfg80211_ops lbs_cfg80211_ops = { .set_monitor_channel = lbs_cfg_set_monitor_channel, .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel, .scan = lbs_cfg_scan, diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 301170cccfff..033ff881c751 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -305,7 +305,7 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, } lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return 0; + return ret; } static int lbs_wait_for_ds_awake(struct lbs_private *priv) diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index c47d6366875d..a75013ac84d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c @@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, { struct txpd *local_tx_pd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); - unsigned int pad; - int headroom = (priv->adapter->iface_type == - MWIFIEX_USB) ? 0 : INTF_HEADER_LEN; - - pad = ((void *)skb->data - sizeof(*local_tx_pd) - - headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1); - skb_push(skb, pad); skb_push(skb, sizeof(*local_tx_pd)); @@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; /* Always zero as the data is followed by struct txpd */ - local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + - pad); + local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - - sizeof(*local_tx_pd) - - pad); + sizeof(*local_tx_pd)); if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET; @@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, ra_list_flags); return -1; } - skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN); + + /* skb_aggr->data already 64 byte align, just reserve bus interface + * header and txpd. + */ + skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 145cc4b5103b..1e3bd435a694 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) ie_len = ie_buf[1] + sizeof(struct ieee_types_header); band = mwifiex_band_to_radio_type(priv->curr_bss_params.band); - chan = __ieee80211_get_channel(priv->wdev.wiphy, + chan = ieee80211_get_channel(priv->wdev.wiphy, ieee80211_channel_to_frequency(bss_info.bss_chan, band)); diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index b9284b533294..ae2b69db5994 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -114,7 +114,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { p += sprintf(p, "multicast_count=\"%d\"\n", netdev_mc_count(netdev)); - p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); + p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len, + info.ssid.ssid); p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); p += sprintf(p, "country_code = \"%s\"\n", info.country_code); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index ea455948a68a..cb6a1a81d44e 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -434,14 +434,14 @@ enum mwifiex_channel_flags { #define HostCmd_ACT_BITWISE_SET 0x0002 #define HostCmd_ACT_BITWISE_CLR 0x0003 #define HostCmd_RESULT_OK 0x0000 - -#define HostCmd_ACT_MAC_RX_ON 0x0001 -#define HostCmd_ACT_MAC_TX_ON 0x0002 -#define HostCmd_ACT_MAC_WEP_ENABLE 0x0008 -#define HostCmd_ACT_MAC_ETHERNETII_ENABLE 0x0010 -#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 -#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 -#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON 0x2000 +#define HostCmd_ACT_MAC_RX_ON BIT(0) +#define HostCmd_ACT_MAC_TX_ON BIT(1) +#define HostCmd_ACT_MAC_WEP_ENABLE BIT(3) +#define HostCmd_ACT_MAC_ETHERNETII_ENABLE BIT(4) +#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE BIT(7) +#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE BIT(8) +#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON BIT(13) +#define HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE BIT(16) #define HostCmd_BSS_MODE_IBSS 0x0002 #define HostCmd_BSS_MODE_ANY 0x0003 @@ -550,6 +550,7 @@ enum mwifiex_channel_flags { #define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 #define EVENT_RXBA_SYNC 0x00000059 +#define EVENT_UNKNOWN_DEBUG 0x00000063 #define EVENT_BG_SCAN_STOPPED 0x00000065 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f #define EVENT_MULTI_CHAN_INFO 0x0000006a @@ -1084,8 +1085,7 @@ struct host_cmd_ds_802_11_mac_address { }; struct host_cmd_ds_mac_control { - __le16 action; - __le16 reserved; + __le32 action; }; struct host_cmd_ds_mac_multicast_adr { diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index b36cb3fef358..756948385b60 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -92,7 +92,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv) for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++) memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key)); priv->wep_key_curr_index = 0; - priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON | + priv->curr_pkt_filter = HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE | + HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON | HostCmd_ACT_MAC_ETHERNETII_ENABLE; priv->beacon_period = 100; /* beacon interval */ @@ -408,8 +409,6 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) static void mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { - int idx; - if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; @@ -427,23 +426,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); mwifiex_free_cmd_buffer(adapter); - for (idx = 0; idx < adapter->num_mem_types; idx++) { - struct memory_type_mapping *entry = - &adapter->mem_type_mapping_tbl[idx]; - - if (entry->mem_ptr) { - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - } - entry->mem_size = 0; - } - - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } - if (adapter->sleep_cfm) dev_kfree_skb_any(adapter->sleep_cfm); } @@ -656,10 +638,9 @@ void mwifiex_free_priv(struct mwifiex_private *priv) * - Free the adapter * - Notify completion */ -int +void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) { - int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; unsigned long flags; @@ -667,15 +648,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) /* mwifiex already shutdown */ if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) - return 0; - - adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING; - /* wait for mwifiex_process to complete */ - if (adapter->mwifiex_processing) { - mwifiex_dbg(adapter, WARN, - "main process is still running\n"); - return ret; - } + return; /* cancel current command */ if (adapter->curr_cmd) { @@ -726,11 +699,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) mwifiex_adapter_cleanup(adapter); spin_unlock(&adapter->mwifiex_lock); - - /* Notify completion */ - ret = mwifiex_shutdown_fw_complete(adapter); - - return ret; + adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index e5c3a8aa3929..9d80180a5519 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -248,15 +248,14 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) if (adapter->mwifiex_processing || adapter->main_locked) { adapter->more_task_flag = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); - goto exit_main_proc; + return 0; } else { adapter->mwifiex_processing = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); } process_start: do { - if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) || - (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) + if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) break; /* For non-USB interfaces, If we process interrupts first, it @@ -464,9 +463,6 @@ process_start: adapter->mwifiex_processing = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); -exit_main_proc: - if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) - mwifiex_shutdown_drv(adapter); return ret; } EXPORT_SYMBOL_GPL(mwifiex_main_process); @@ -645,16 +641,14 @@ err_dnld_fw: if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); } - adapter->surprise_removed = true; - mwifiex_terminate_workqueue(adapter); + init_failed = true; done: if (adapter->cal_data) { @@ -1032,7 +1026,7 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter) } EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync); -void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) +int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info) { void *p; char drv_version[64]; @@ -1042,21 +1036,17 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; - - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } + void *drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); - adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); + /* memory allocate here should be free in mwifiex_upload_device_dump*/ + drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); - if (!adapter->drv_info_dump) - return; + if (!drv_info_dump) + return 0; - p = (char *)(adapter->drv_info_dump); + p = (char *)(drv_info_dump); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, @@ -1140,18 +1130,20 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) kfree(debug_info); } - adapter->drv_info_size = p - adapter->drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); + *drv_info = drv_info_dump; + return p - drv_info_dump; } EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, + int drv_info_size) { u8 idx, *dump_data, *fw_dump_ptr; u32 dump_len; dump_len = (strlen("========Start dump driverinfo========\n") + - adapter->drv_info_size + + drv_info_size + strlen("\n========End dump========\n")); for (idx = 0; idx < adapter->num_mem_types; idx++) { @@ -1181,8 +1173,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); fw_dump_ptr += strlen("========Start dump driverinfo========\n"); - memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size); - fw_dump_ptr += adapter->drv_info_size; + memcpy(fw_dump_ptr, drv_info, drv_info_size); + fw_dump_ptr += drv_info_size; strcpy(fw_dump_ptr, "\n========End dump========\n"); fw_dump_ptr += strlen("\n========End dump========\n"); @@ -1220,18 +1212,12 @@ done: struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; - if (entry->mem_ptr) { - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - } + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; entry->mem_size = 0; } - if (adapter->drv_info_dump) { - vfree(adapter->drv_info_dump); - adapter->drv_info_dump = NULL; - adapter->drv_info_size = 0; - } + vfree(drv_info); } EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); @@ -1362,7 +1348,7 @@ static void mwifiex_main_work_queue(struct work_struct *work) * This function gets called during PCIe function level reset. Required * code is extracted from mwifiex_remove_card() */ -static int +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; @@ -1399,11 +1385,8 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - adapter->init_wait_q_woken = false; - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); if (adapter->if_ops.down_dev) adapter->if_ops.down_dev(adapter); @@ -1434,24 +1417,18 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) exit_return: return 0; } +EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); /* This function gets called during PCIe function level reset. Required * code is extracted from mwifiex_add_card() */ -static int -mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done, - struct mwifiex_if_ops *if_ops, u8 iface_type) +int +mwifiex_reinit_sw(struct mwifiex_adapter *adapter) { - char fw_name[32]; - struct pcie_service_card *card = adapter->card; - mwifiex_init_lock_list(adapter); if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); - adapter->iface_type = iface_type; - adapter->fw_done = fw_done; - adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; adapter->surprise_removed = false; init_waitqueue_head(&adapter->init_wait_q); @@ -1488,18 +1465,12 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done, * mwifiex_register_dev() */ mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__); - strcpy(fw_name, adapter->fw_name); - strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME); - adapter->tx_buf_size = card->pcie.tx_buf_size; - adapter->ext_scan = card->pcie.can_ext_scan; if (mwifiex_init_hw_fw(adapter, false)) { - strcpy(adapter->fw_name, fw_name); mwifiex_dbg(adapter, ERROR, "%s: firmware init failed\n", __func__); goto err_init_fw; } - strcpy(adapter->fw_name, fw_name); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); complete_all(adapter->fw_done); @@ -1509,43 +1480,22 @@ err_init_fw: mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); + +err_kmalloc: + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); } -err_kmalloc: - mwifiex_terminate_workqueue(adapter); - adapter->surprise_removed = true; complete_all(adapter->fw_done); mwifiex_dbg(adapter, INFO, "%s, error\n", __func__); return -1; } - -/* This function processes pre and post PCIe function level resets. - * It performs software cleanup without touching PCIe specific code. - * Also, during initialization PCIe stuff is skipped. - */ -void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare) -{ - struct mwifiex_if_ops if_ops; - - if (!prepare) { - mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops, - adapter->iface_type); - } else { - memcpy(&if_ops, &adapter->if_ops, - sizeof(struct mwifiex_if_ops)); - mwifiex_shutdown_sw(adapter); - } -} -EXPORT_SYMBOL_GPL(mwifiex_do_flr); +EXPORT_SYMBOL_GPL(mwifiex_reinit_sw); static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv) { @@ -1681,17 +1631,13 @@ err_init_fw: pr_debug("info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); - if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { - pr_debug("info: %s: shutdown mwifiex\n", __func__); - adapter->init_wait_q_woken = false; - - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); - } err_registerdev: adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { + pr_debug("info: %s: shutdown mwifiex\n", __func__); + mwifiex_shutdown_drv(adapter); + } err_kmalloc: mwifiex_free_adapter(adapter); @@ -1741,11 +1687,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - adapter->init_wait_q_woken = false; - if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) - wait_event_interruptible(adapter->init_wait_q, - adapter->init_wait_q_woken); + mwifiex_shutdown_drv(adapter); mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); if (atomic_read(&adapter->rx_pending) || diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5c9bd944b6ea..5c8297207f33 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -248,7 +248,6 @@ enum MWIFIEX_HARDWARE_STATUS { MWIFIEX_HW_STATUS_INITIALIZING, MWIFIEX_HW_STATUS_INIT_DONE, MWIFIEX_HW_STATUS_RESET, - MWIFIEX_HW_STATUS_CLOSING, MWIFIEX_HW_STATUS_NOT_READY }; @@ -530,7 +529,7 @@ struct mwifiex_private { u8 tx_timeout_cnt; struct net_device *netdev; struct net_device_stats stats; - u16 curr_pkt_filter; + u32 curr_pkt_filter; u32 bss_mode; u32 pkt_tx_ctrl; u16 tx_power_level; @@ -995,8 +994,6 @@ struct mwifiex_adapter { u8 key_api_major_ver, key_api_minor_ver; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; - void *drv_info_dump; - u32 drv_info_size; bool scan_chan_gap_enabled; struct sk_buff_head rx_data_q; bool mfg_mode; @@ -1041,9 +1038,7 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); -int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter); - -int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter); +void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter); int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); @@ -1644,8 +1639,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); -void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); +int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, + int drv_info_size); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, @@ -1670,5 +1666,6 @@ void mwifiex_debugfs_remove(void); void mwifiex_dev_debugfs_init(struct mwifiex_private *priv); void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv); #endif -void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare); +int mwifiex_reinit_sw(struct mwifiex_adapter *adapter); +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter); #endif /* !_MWIFIEX_MAIN_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 4db07da81d8d..a0d918094889 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -31,8 +31,6 @@ #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" -static u8 user_rmmod; - static struct mwifiex_if_ops pcie_ops; static const struct of_device_id mwifiex_pcie_of_match_table[] = { @@ -51,6 +49,8 @@ static int mwifiex_pcie_probe_of(struct device *dev) return 0; } +static void mwifiex_pcie_work(struct work_struct *work); + static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, size_t size, int flags) @@ -79,6 +79,42 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, } /* + * This function writes data into PCIE card register. + */ +static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) +{ + struct pcie_service_card *card = adapter->card; + + iowrite32(data, card->pci_mmap1 + reg); + + return 0; +} + +/* This function reads data from PCIE card register. + */ +static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) +{ + struct pcie_service_card *card = adapter->card; + + *data = ioread32(card->pci_mmap1 + reg); + if (*data == 0xffffffff) + return 0xffffffff; + + return 0; +} + +/* This function reads u8 data from PCIE card register. */ +static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, + int reg, u8 *data) +{ + struct pcie_service_card *card = adapter->card; + + *data = ioread8(card->pci_mmap1 + reg); + + return 0; +} + +/* * This function reads sleep cookie and checks if FW is ready */ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) @@ -219,6 +255,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; card->pcie.num_mem_types = data->num_mem_types; card->pcie.can_ext_scan = data->can_ext_scan; + INIT_WORK(&card->work, mwifiex_pcie_work); } /* device tree node parsing and platform specific configuration*/ @@ -245,6 +282,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) struct pcie_service_card *card; struct mwifiex_adapter *adapter; struct mwifiex_private *priv; + const struct mwifiex_pcie_card_reg *reg; + u32 fw_status; + int ret; card = pci_get_drvdata(pdev); @@ -254,7 +294,15 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) if (!adapter || !adapter->priv_num) return; - if (user_rmmod && !adapter->mfg_mode) { + cancel_work_sync(&card->work); + + reg = card->pcie.reg; + if (reg) + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + else + fw_status = -1; + + if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -269,7 +317,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) static void mwifiex_pcie_shutdown(struct pci_dev *pdev) { - user_rmmod = 1; mwifiex_pcie_remove(pdev); return; @@ -330,7 +377,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) * Cleanup all software without cleaning anything related to * PCIe and HW. */ - mwifiex_do_flr(adapter, prepare); + mwifiex_shutdown_sw(adapter); adapter->surprise_removed = true; } else { /* Kernel stores and restores PCIe function context before and @@ -338,7 +385,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) * and firmware including firmware redownload */ adapter->surprise_removed = false; - mwifiex_do_flr(adapter, prepare); + mwifiex_reinit_sw(adapter); } mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } @@ -369,43 +416,6 @@ static struct pci_driver __refdata mwifiex_pcie = { }; /* - * This function writes data into PCIE card register. - */ -static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) -{ - struct pcie_service_card *card = adapter->card; - - iowrite32(data, card->pci_mmap1 + reg); - - return 0; -} - -/* - * This function reads data from PCIE card register. - */ -static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) -{ - struct pcie_service_card *card = adapter->card; - - *data = ioread32(card->pci_mmap1 + reg); - if (*data == 0xffffffff) - return 0xffffffff; - - return 0; -} - -/* This function reads u8 data from PCIE card register. */ -static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, - int reg, u8 *data) -{ - struct pcie_service_card *card = adapter->card; - - *data = ioread8(card->pci_mmap1 + reg); - - return 0; -} - -/* * This function adds delay loop to ensure FW is awake before proceeding. */ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter) @@ -429,16 +439,25 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, struct pcie_service_card *card = adapter->card; u8 *buffer; u32 sleep_cookie, count; + struct sk_buff *cmdrsp = card->cmdrsp_buf; for (count = 0; count < max_delay_loop_cnt; count++) { - buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN; - sleep_cookie = *(u32 *)buffer; + pci_dma_sync_single_for_cpu(card->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), + PCI_DMA_FROMDEVICE); + buffer = cmdrsp->data; + sleep_cookie = READ_ONCE(*(u32 *)buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { mwifiex_dbg(adapter, INFO, "sleep cookie found at count %d\n", count); break; } + pci_dma_sync_single_for_device(card->dev, + MWIFIEX_SKB_DMA_ADDR(cmdrsp), + sizeof(sleep_cookie), + PCI_DMA_FROMDEVICE); usleep_range(20, 30); } @@ -450,7 +469,6 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, /* This function wakes up the card by reading fw_status register. */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { - u32 fw_status; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -460,10 +478,10 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); - /* Reading fw_status register will wakeup device */ - if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) { + /* Accessing fw_status register will wakeup device */ + if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { mwifiex_dbg(adapter, ERROR, - "Reading fw_status register failed\n"); + "Writing fw_status register failed\n"); return -1; } @@ -1681,7 +1699,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, CMD, "info: Rx CMD Response\n"); - mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); + if (adapter->curr_cmd) + mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); + else + pci_dma_sync_single_for_cpu(card->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_UPLD_SIZE, + PCI_DMA_FROMDEVICE); /* Unmap the command as a response has been received. */ if (card->cmd_buf) { @@ -1694,10 +1718,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) rx_len = le16_to_cpu(pkt_len); skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, rx_len); - skb_pull(skb, INTF_HEADER_LEN); if (!adapter->curr_cmd) { if (adapter->ps_state == PS_STATE_SLEEP_CFM) { + pci_dma_sync_single_for_device(card->dev, + MWIFIEX_SKB_DMA_ADDR(skb), + MWIFIEX_SLEEP_COOKIE_SIZE, + PCI_DMA_FROMDEVICE); if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { @@ -1707,6 +1734,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) } mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); + mwifiex_unmap_pci_memory(adapter, skb, + PCI_DMA_FROMDEVICE); + skb_pull(skb, INTF_HEADER_LEN); while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); @@ -1724,6 +1754,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) PCI_DMA_FROMDEVICE)) return -1; } else if (mwifiex_pcie_ok_to_access_hw(adapter)) { + skb_pull(skb, INTF_HEADER_LEN); adapter->curr_cmd->resp_skb = skb; adapter->cmd_resp_received = true; /* Take the pointer and set it to CMD node and will @@ -2325,79 +2356,41 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } } } - while (pcie_ireg & HOST_INTR_MASK) { - if (pcie_ireg & HOST_INTR_DNLD_DONE) { - pcie_ireg &= ~HOST_INTR_DNLD_DONE; - mwifiex_dbg(adapter, INTR, - "info: TX DNLD Done\n"); - ret = mwifiex_pcie_send_data_complete(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_UPLD_RDY) { - pcie_ireg &= ~HOST_INTR_UPLD_RDY; - mwifiex_dbg(adapter, INTR, - "info: Rx DATA\n"); - ret = mwifiex_pcie_process_recv_data(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_EVENT_RDY) { - pcie_ireg &= ~HOST_INTR_EVENT_RDY; - mwifiex_dbg(adapter, INTR, - "info: Rx EVENT\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - if (ret) - return ret; - } - - if (pcie_ireg & HOST_INTR_CMD_DONE) { - pcie_ireg &= ~HOST_INTR_CMD_DONE; - if (adapter->cmd_sent) { - mwifiex_dbg(adapter, INTR, - "info: CMD sent Interrupt\n"); - adapter->cmd_sent = false; - } - /* Handle command response */ - ret = mwifiex_pcie_process_cmd_complete(adapter); - if (ret) - return ret; - if (adapter->hs_activated) - return ret; - } - - if (card->msi_enable) { - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - } - - if (mwifiex_pcie_ok_to_access_hw(adapter)) { - if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, - &pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Read register failed\n"); - return -1; - } - - if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { - if (mwifiex_write_reg(adapter, - PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } - } + if (pcie_ireg & HOST_INTR_DNLD_DONE) { + pcie_ireg &= ~HOST_INTR_DNLD_DONE; + mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); + ret = mwifiex_pcie_send_data_complete(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_UPLD_RDY) { + pcie_ireg &= ~HOST_INTR_UPLD_RDY; + mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); + ret = mwifiex_pcie_process_recv_data(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_EVENT_RDY) { + pcie_ireg &= ~HOST_INTR_EVENT_RDY; + mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); + ret = mwifiex_pcie_process_event_ready(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_CMD_DONE) { + pcie_ireg &= ~HOST_INTR_CMD_DONE; + if (adapter->cmd_sent) { + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); + adapter->cmd_sent = false; } - if (!card->msi_enable) { - spin_lock_irqsave(&adapter->int_lock, flags); - pcie_ireg |= adapter->int_status; - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - } + /* Handle command response */ + ret = mwifiex_pcie_process_cmd_complete(adapter); + if (ret) + return ret; } + mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); @@ -2715,31 +2708,35 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) { - mwifiex_drv_info_dump(adapter); + int drv_info_size; + void *drv_info; + + drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); mwifiex_pcie_fw_dump(adapter); - mwifiex_upload_device_dump(adapter); + mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); } -static unsigned long iface_work_flags; -static struct mwifiex_adapter *save_adapter; static void mwifiex_pcie_work(struct work_struct *work) { + struct pcie_service_card *card = + container_of(work, struct pcie_service_card, work); + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, - &iface_work_flags)) - mwifiex_pcie_device_dump_work(save_adapter); + &card->work_flags)) + mwifiex_pcie_device_dump_work(card->adapter); } -static DECLARE_WORK(pcie_work, mwifiex_pcie_work); /* This function dumps FW information */ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) + struct pcie_service_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); - schedule_work(&pcie_work); + schedule_work(&card->work); } /* @@ -2752,7 +2749,7 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) * - Allocate command response ring buffer * - Allocate sleep cookie buffer */ -static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) +static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; int ret; @@ -2861,13 +2858,16 @@ err_enable_dev: * - Command response ring buffer * - Sleep cookie buffer */ -static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) +static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + int ret; + u32 fw_status; - if (user_rmmod) { + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) @@ -3058,7 +3058,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) * - Allocate event BD ring buffers * - Allocate command response ring buffer * - Allocate sleep cookie buffer - * Part of mwifiex_pcie_init(), not reset the PCIE registers + * Part of mwifiex_init_pcie(), not reset the PCIE registers */ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { @@ -3067,6 +3067,17 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + /* Bluetooth is not on pcie interface. Download Wifi only firmware + * during pcie FLR, so that bluetooth part of firmware which is + * already running doesn't get affected. + */ + strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME); + + /* tx_buf_size might be changed to 3584 by firmware during + * data transfer, we should reset it to default size. + */ + adapter->tx_buf_size = card->pcie.tx_buf_size; + card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); if (ret) { @@ -3128,7 +3139,6 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); adapter->seq_num = 0; - adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; if (reg->sleep_cookie) mwifiex_pcie_delete_sleep_cookie_buf(adapter); @@ -3141,8 +3151,8 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) } static struct mwifiex_if_ops pcie_ops = { - .init_if = mwifiex_pcie_init, - .cleanup_if = mwifiex_pcie_cleanup, + .init_if = mwifiex_init_pcie, + .cleanup_if = mwifiex_cleanup_pcie, .check_fw_status = mwifiex_check_fw_status, .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, @@ -3168,49 +3178,7 @@ static struct mwifiex_if_ops pcie_ops = { .up_dev = mwifiex_pcie_up_dev, }; -/* - * This function initializes the PCIE driver module. - * - * This registers the device with PCIE bus. - */ -static int mwifiex_pcie_init_module(void) -{ - int ret; - - pr_debug("Marvell PCIe Driver\n"); - - /* Clear the flag in case user removes the card. */ - user_rmmod = 0; - - ret = pci_register_driver(&mwifiex_pcie); - if (ret) - pr_err("Driver register failed!\n"); - else - pr_debug("info: Driver registered successfully!\n"); - - return ret; -} - -/* - * This function cleans up the PCIE driver. - * - * The following major steps are followed for cleanup - - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from PCIE bus. - */ -static void mwifiex_pcie_cleanup_module(void) -{ - /* Set the flag as user is removing this module. */ - user_rmmod = 1; - - cancel_work_sync(&pcie_work); - pci_unregister_driver(&mwifiex_pcie); -} - -module_init(mwifiex_pcie_init_module); -module_exit(mwifiex_pcie_cleanup_module); +module_pci_driver(mwifiex_pcie); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index ae3365d1c34e..00e8ee5ad4a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -116,6 +116,7 @@ /* FW awake cookie after FW ready */ #define FW_AWAKE_COOKIE (0xAA55AA55) #define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF +#define MWIFIEX_SLEEP_COOKIE_SIZE 4 #define MWIFIEX_MAX_DELAY_COUNT 100 struct mwifiex_pcie_card_reg { @@ -386,6 +387,8 @@ struct pcie_service_card { #endif struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS]; struct mwifiex_msix_context share_irq_ctx; + struct work_struct work; + unsigned long work_flags; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 740d79cd91fa..a4b356d267f9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -31,23 +31,9 @@ #define SDIO_VERSION "1.0" -/* The mwifiex_sdio_remove() callback function is called when - * user removes this module from kernel space or ejects - * the card from the slot. The driver handles these 2 cases - * differently. - * If the user is removing the module, the few commands (FUNC_SHUTDOWN, - * HS_CANCEL etc.) are sent to the firmware. - * If the card is removed, there is no need to send these command. - * - * The variable 'user_rmmod' is used to distinguish these two - * scenarios. This flag is initialized as FALSE in case the card - * is removed, and will be set to TRUE for module removal when - * module_exit function is called. - */ -static u8 user_rmmod; +static void mwifiex_sdio_work(struct work_struct *work); static struct mwifiex_if_ops sdio_ops; -static unsigned long iface_work_flags; static struct memory_type_mapping generic_mem_type_map[] = { {"DUMP", NULL, 0, 0xDD}, @@ -116,7 +102,6 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) init_completion(&card->fw_done); card->func = func; - card->device_id = id; func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; @@ -136,6 +121,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->fw_dump_enh = data->fw_dump_enh; card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; + INIT_WORK(&card->work, mwifiex_sdio_work); } sdio_claim_host(func); @@ -212,6 +198,171 @@ static int mwifiex_sdio_resume(struct device *dev) return 0; } +/* Write data into SDIO card register. Caller claims SDIO device. */ +static int +mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data) +{ + int ret = -1; + + sdio_writeb(func, data, reg, &ret); + return ret; +} + +/* This function writes data into SDIO card register. + */ +static int +mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + + sdio_claim_host(card->func); + ret = mwifiex_write_reg_locked(card->func, reg, data); + sdio_release_host(card->func); + + return ret; +} + +/* This function reads data from SDIO card register. + */ +static int +mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) +{ + struct sdio_mmc_card *card = adapter->card; + int ret = -1; + u8 val; + + sdio_claim_host(card->func); + val = sdio_readb(card->func, reg, &ret); + sdio_release_host(card->func); + + *data = val; + + return ret; +} + +/* This function writes multiple data into SDIO card memory. + * + * This does not work in suspended mode. + */ +static int +mwifiex_write_data_sync(struct mwifiex_adapter *adapter, + u8 *buffer, u32 pkt_len, u32 port) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + u8 blk_mode = + (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; + u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; + u32 blk_cnt = + (blk_mode == + BLOCK_MODE) ? (pkt_len / + MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; + u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); + + if (adapter->is_suspended) { + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); + return -1; + } + + sdio_claim_host(card->func); + + ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size); + + sdio_release_host(card->func); + + return ret; +} + +/* This function reads multiple data from SDIO card memory. + */ +static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, + u32 len, u32 port, u8 claim) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE + : BLOCK_MODE; + u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; + u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) + : len; + u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); + + if (claim) + sdio_claim_host(card->func); + + ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); + + if (claim) + sdio_release_host(card->func); + + return ret; +} + +/* This function reads the firmware status. + */ +static int +mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) +{ + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u8 fws0, fws1; + + if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) + return -1; + + if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) + return -1; + + *dat = (u16)((fws1 << 8) | fws0); + return 0; +} + +/* This function checks the firmware status in card. + */ +static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, + u32 poll_num) +{ + int ret = 0; + u16 firmware_stat; + u32 tries; + + for (tries = 0; tries < poll_num; tries++) { + ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); + if (ret) + continue; + if (firmware_stat == FIRMWARE_READY_SDIO) { + ret = 0; + break; + } + + msleep(100); + ret = -1; + } + + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + int ret = 0; + u8 winner = 0; + struct sdio_mmc_card *card = adapter->card; + + if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) + return -1; + + if (winner) + adapter->winner = 0; + else + adapter->winner = 1; + + return ret; +} + /* * SDIO remove. * @@ -223,6 +374,8 @@ mwifiex_sdio_remove(struct sdio_func *func) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; struct mwifiex_private *priv; + int ret = 0; + u16 firmware_stat; card = sdio_get_drvdata(func); if (!card) @@ -234,9 +387,12 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; + cancel_work_sync(&card->work); + mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); - if (user_rmmod && !adapter->mfg_mode) { + ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); + if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -364,111 +520,6 @@ static struct sdio_driver mwifiex_sdio = { } }; -/* Write data into SDIO card register. Caller claims SDIO device. */ -static int -mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data) -{ - int ret = -1; - sdio_writeb(func, data, reg, &ret); - return ret; -} - -/* - * This function writes data into SDIO card register. - */ -static int -mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - - sdio_claim_host(card->func); - ret = mwifiex_write_reg_locked(card->func, reg, data); - sdio_release_host(card->func); - - return ret; -} - -/* - * This function reads data from SDIO card register. - */ -static int -mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) -{ - struct sdio_mmc_card *card = adapter->card; - int ret = -1; - u8 val; - - sdio_claim_host(card->func); - val = sdio_readb(card->func, reg, &ret); - sdio_release_host(card->func); - - *data = val; - - return ret; -} - -/* - * This function writes multiple data into SDIO card memory. - * - * This does not work in suspended mode. - */ -static int -mwifiex_write_data_sync(struct mwifiex_adapter *adapter, - u8 *buffer, u32 pkt_len, u32 port) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - u8 blk_mode = - (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE; - u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; - u32 blk_cnt = - (blk_mode == - BLOCK_MODE) ? (pkt_len / - MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; - u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); - - if (adapter->is_suspended) { - mwifiex_dbg(adapter, ERROR, - "%s: not allowed while suspended\n", __func__); - return -1; - } - - sdio_claim_host(card->func); - - ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size); - - sdio_release_host(card->func); - - return ret; -} - -/* - * This function reads multiple data from SDIO card memory. - */ -static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, - u32 len, u32 port, u8 claim) -{ - struct sdio_mmc_card *card = adapter->card; - int ret; - u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE - : BLOCK_MODE; - u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1; - u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) - : len; - u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); - - if (claim) - sdio_claim_host(card->func); - - ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size); - - if (claim) - sdio_release_host(card->func); - - return ret; -} - /* * This function wakes up the card. * @@ -755,27 +806,6 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) } /* - * This function reads the firmware status. - */ -static int -mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) -{ - struct sdio_mmc_card *card = adapter->card; - const struct mwifiex_sdio_card_reg *reg = card->reg; - u8 fws0, fws1; - - if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) - return -1; - - if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) - return -1; - - *dat = (u16) ((fws1 << 8) | fws0); - - return 0; -} - -/* * This function disables the host interrupt. * * The host interrupt mask is read, the disable bit is reset and @@ -1080,51 +1110,6 @@ done: } /* - * This function checks the firmware status in card. - */ -static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, - u32 poll_num) -{ - int ret = 0; - u16 firmware_stat; - u32 tries; - - for (tries = 0; tries < poll_num; tries++) { - ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); - if (ret) - continue; - if (firmware_stat == FIRMWARE_READY_SDIO) { - ret = 0; - break; - } else { - msleep(100); - ret = -1; - } - } - - return ret; -} - -/* This function checks if WLAN is the winner. - */ -static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) -{ - int ret = 0; - u8 winner = 0; - struct sdio_mmc_card *card = adapter->card; - - if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) - return -1; - - if (winner) - adapter->winner = 0; - else - adapter->winner = 1; - - return ret; -} - -/* * This function decode sdio aggreation pkt. * * Based on the the data block size and pkt_len, @@ -2204,54 +2189,25 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) port, card->mp_data_port_mask); } -static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) +static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; struct sdio_func *func = card->func; - const struct sdio_device_id *device_id = card->device_id; - - /* TODO mmc_hw_reset does not require destroying and re-probing the - * whole adapter. Hence there was no need to for this rube-goldberg - * design to reload the fw from an external workqueue. If we don't - * destroy the adapter we could reload the fw from - * mwifiex_main_work_queue directly. - * The real difficulty with fw reset is to restore all the user - * settings applied through ioctl. By destroying and recreating the - * adapter, we take the easy way out, since we rely on user space to - * restore them. We assume that user space will treat the new - * incarnation of the adapter(interfaces) as if they had been just - * discovered and initializes them from scratch. - */ - mwifiex_sdio_remove(func); - - /* - * Normally, we would let the driver core take care of releasing these. - * But we're not letting the driver core handle this one. See above - * TODO. - */ - sdio_set_drvdata(func, NULL); - devm_kfree(&func->dev, card); + mwifiex_shutdown_sw(adapter); /* power cycle the adapter */ sdio_claim_host(func); mmc_hw_reset(func->card->host); sdio_release_host(func); - mwifiex_sdio_probe(func, device_id); -} - -static struct mwifiex_adapter *save_adapter; -static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) -{ - struct sdio_mmc_card *card = adapter->card; - - /* TODO card pointer is unprotected. If the adapter is removed - * physically, sdio core might trigger mwifiex_sdio_remove, before this - * workqueue is run, which will destroy the adapter struct. When this - * workqueue eventually exceutes it will dereference an invalid adapter - * pointer + /* Previous save_adapter won't be valid after this. We will cancel + * pending work requests. */ - mwifiex_recreate_adapter(card); + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); + + mwifiex_reinit_sw(adapter); } /* This function read/write firmware */ @@ -2542,47 +2498,53 @@ done: static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + int drv_info_size; + void *drv_info; - mwifiex_drv_info_dump(adapter); + drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); if (card->fw_dump_enh) mwifiex_sdio_generic_fw_dump(adapter); else mwifiex_sdio_fw_dump(adapter); - mwifiex_upload_device_dump(adapter); + mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); } static void mwifiex_sdio_work(struct work_struct *work) { + struct sdio_mmc_card *card = + container_of(work, struct sdio_mmc_card, work); + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, - &iface_work_flags)) - mwifiex_sdio_device_dump_work(save_adapter); + &card->work_flags)) + mwifiex_sdio_device_dump_work(card->adapter); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, - &iface_work_flags)) - mwifiex_sdio_card_reset_work(save_adapter); + &card->work_flags)) + mwifiex_sdio_card_reset_work(card->adapter); } -static DECLARE_WORK(sdio_work, mwifiex_sdio_work); /* This function resets the card */ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags)) + struct sdio_mmc_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - schedule_work(&sdio_work); + schedule_work(&card->work); } /* This function dumps FW information */ static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter) { - save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) + struct sdio_mmc_card *card = adapter->card; + + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); - schedule_work(&sdio_work); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + schedule_work(&card->work); } /* Function to dump SDIO function registers and SDIO scratch registers in case @@ -2678,6 +2640,33 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) return p - drv_buf; } +/* sdio device/function initialization, code is extracted + * from init_if handler and register_dev handler. + */ +static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter) +{ + struct sdio_mmc_card *card = adapter->card; + u8 sdio_ireg; + + sdio_claim_host(card->func); + sdio_enable_func(card->func); + sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); + sdio_release_host(card->func); + + /* tx_buf_size might be changed to 3584 by firmware during + * data transfer, we will reset to default size. + */ + adapter->tx_buf_size = card->tx_buf_size; + + /* Read the host_int_status_reg for ACK the first interrupt got + * from the bootloader. If we don't do this we get a interrupt + * as soon as we register the irq. + */ + mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); + + mwifiex_init_sdio_ioport(adapter); +} + static struct mwifiex_if_ops sdio_ops = { .init_if = mwifiex_init_sdio, .cleanup_if = mwifiex_cleanup_sdio, @@ -2703,43 +2692,10 @@ static struct mwifiex_if_ops sdio_ops = { .reg_dump = mwifiex_sdio_reg_dump, .device_dump = mwifiex_sdio_device_dump, .deaggr_pkt = mwifiex_deaggr_sdio_pkt, + .up_dev = mwifiex_sdio_up_dev, }; -/* - * This function initializes the SDIO driver. - * - * This registers the device with SDIO bus. - */ -static int -mwifiex_sdio_init_module(void) -{ - /* Clear the flag in case user removes the card. */ - user_rmmod = 0; - - return sdio_register_driver(&mwifiex_sdio); -} - -/* - * This function cleans up the SDIO driver. - * - * The following major steps are followed for cleanup - - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from SDIO bus. - */ -static void -mwifiex_sdio_cleanup_module(void) -{ - /* Set the flag as user is removing this module. */ - user_rmmod = 1; - cancel_work_sync(&sdio_work); - - sdio_unregister_driver(&mwifiex_sdio); -} - -module_init(mwifiex_sdio_init_module); -module_exit(mwifiex_sdio_cleanup_module); +module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index cdbf3a3ac7f9..dccf7fd1aef3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -268,8 +268,8 @@ struct sdio_mmc_card { struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; - /* needed for card reset */ - const struct sdio_device_id *device_id; + struct work_struct work; + unsigned long work_flags; }; struct mwifiex_sdio_device { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 125e448712dd..2f1f4d190b28 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -76,7 +76,7 @@ mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv, */ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, - u16 cmd_action, u16 *action) + u16 cmd_action, u32 *action) { struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl; @@ -89,7 +89,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN); - mac_ctrl->action = cpu_to_le16(*action); + mac_ctrl->action = cpu_to_le32(*action); return 0; } @@ -1935,8 +1935,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, mwifiex_dbg(priv->adapter, ERROR, "0x%x command not supported by firmware\n", cmd_no); - return -EOPNOTSUPP; - } + return -EOPNOTSUPP; + } /* Prepare command */ switch (cmd_no) { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 9df0c4dc06ed..96503d3d053f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -1009,6 +1009,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->event_skb->len - sizeof(eventcause)); break; + /* Debugging event; not used, but let's not print an ERROR for it. */ + case EVENT_UNKNOWN_DEBUG: + mwifiex_dbg(adapter, EVENT, "event: debug\n"); + break; default: mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index c563160b3b6b..9cf3334adf4d 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -22,7 +22,6 @@ #define USB_VERSION "1.0" -static u8 user_rmmod; static struct mwifiex_if_ops usb_ops; static struct usb_device_id mwifiex_usb_table[] = { @@ -618,7 +617,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) if (!adapter || !adapter->priv_num) return; - if (user_rmmod && !adapter->mfg_mode) { + if (card->udev->state != USB_STATE_NOTATTACHED && !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, @@ -1201,43 +1200,7 @@ static struct mwifiex_if_ops usb_ops = { .is_port_ready = mwifiex_usb_is_port_ready, }; -/* This function initializes the USB driver module. - * - * This registers the device with USB bus. - */ -static int mwifiex_usb_init_module(void) -{ - int ret; - - pr_debug("Marvell USB8797 Driver\n"); - - ret = usb_register(&mwifiex_usb_driver); - if (ret) - pr_err("Driver register failed!\n"); - else - pr_debug("info: Driver registered successfully!\n"); - - return ret; -} - -/* This function cleans up the USB driver. - * - * The following major steps are followed in .disconnect for cleanup: - * - Resume the device if its suspended - * - Disconnect the device if connected - * - Shutdown the firmware - * - Unregister the device from USB bus. - */ -static void mwifiex_usb_cleanup_module(void) -{ - /* set the flag as user is removing this module */ - user_rmmod = 1; - - usb_deregister(&mwifiex_usb_driver); -} - -module_init(mwifiex_usb_init_module); -module_exit(mwifiex_usb_cleanup_module); +module_usb_driver(mwifiex_usb_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION); diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 18fbb96a46e9..b1ab8da121dd 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -146,21 +146,6 @@ int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter) } /* - * Firmware shutdown complete callback handler. - * - * This function sets the hardware status to not ready and wakes up - * the function waiting on the init wait queue for the firmware - * shutdown to complete. - */ -int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter) -{ - adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY; - adapter->init_wait_q_woken = true; - wake_up_interruptible(&adapter->init_wait_q); - return 0; -} - -/* * This function sends init/shutdown command * to firmware. */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 95c1d7c0a2f3..256496bfbafb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -72,6 +72,7 @@ #define RF5592 0x000f #define RF3070 0x3070 #define RF3290 0x3290 +#define RF5350 0x5350 #define RF5360 0x5360 #define RF5362 0x5362 #define RF5370 0x5370 @@ -2286,6 +2287,8 @@ struct mac_iveiv_entry { #define RFCSR30_RX_H20M FIELD8(0x04) #define RFCSR30_RX_VCM FIELD8(0x18) #define RFCSR30_RF_CALIBRATION FIELD8(0x80) +#define RF3322_RFCSR30_TX_H20M FIELD8(0x01) +#define RF3322_RFCSR30_RX_H20M FIELD8(0x02) /* * RFCSR 31: @@ -2301,6 +2304,12 @@ struct mac_iveiv_entry { #define RFCSR36_RF_BS FIELD8(0x80) /* + * RFCSR 34: + */ +#define RFCSR34_TX0_EXT_PA FIELD8(0x04) +#define RFCSR34_TX1_EXT_PA FIELD8(0x08) + +/* * RFCSR 38: */ #define RFCSR38_RX_LO1_EN FIELD8(0x20) @@ -2312,6 +2321,18 @@ struct mac_iveiv_entry { #define RFCSR39_RX_LO2_EN FIELD8(0x80) /* + * RFCSR 41: + */ +#define RFCSR41_BIT1 FIELD8(0x01) +#define RFCSR41_BIT4 FIELD8(0x08) + +/* + * RFCSR 42: + */ +#define RFCSR42_BIT1 FIELD8(0x01) +#define RFCSR42_BIT4 FIELD8(0x08) + +/* * RFCSR 49: */ #define RFCSR49_TX FIELD8(0x3f) @@ -2324,6 +2345,8 @@ struct mac_iveiv_entry { * RFCSR 50: */ #define RFCSR50_TX FIELD8(0x3f) +#define RFCSR50_TX0_EXT_PA FIELD8(0x02) +#define RFCSR50_TX1_EXT_PA FIELD8(0x10) #define RFCSR50_EP FIELD8(0xc0) /* bits for RT3593 */ #define RFCSR50_TX_LO1_EN FIELD8(0x20) @@ -2471,6 +2494,8 @@ enum rt2800_eeprom_word { * INTERNAL_TX_ALC: 0: disable, 1: enable * BT_COEXIST: 0: disable, 1: enable * DAC_TEST: 0: disable, 1: enable + * EXTERNAL_TX0_PA: 0: disable, 1: enable (only on RT3352) + * EXTERNAL_TX1_PA: 0: disable, 1: enable (only on RT3352) */ #define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001) #define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002) @@ -2487,6 +2512,8 @@ enum rt2800_eeprom_word { #define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000) #define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000) #define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000) +#define EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352 FIELD16(0x4000) +#define EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352 FIELD16(0x8000) /* * EEPROM frequency @@ -2979,7 +3006,9 @@ struct rt2800_drv_data { u8 bbp26; u8 txmixer_gain_24g; u8 txmixer_gain_5g; + u8 max_psdu; unsigned int tbtt_tick; + unsigned int ampdu_factor_cnt[4]; DECLARE_BITMAP(sta_ids, STA_IDS_SIZE); }; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 4fb79e05078f..572cdea4ca25 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -373,9 +373,6 @@ static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) int i, count; rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); rt2x00_set_field32(®, WLAN_CLK_EN, 0); @@ -967,8 +964,6 @@ static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev) bcn_num++; } - WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing); - rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg); rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32)); @@ -1418,6 +1413,23 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, } EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key); +static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev) +{ + u8 i, max_psdu; + u32 reg; + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + + for (i = 0; i < 3; i++) + if (drv_data->ampdu_factor_cnt[i] > 0) + break; + + max_psdu = min(drv_data->max_psdu, i); + + rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®); + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, max_psdu); + rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); +} + int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { @@ -1426,6 +1438,17 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; /* + * Limit global maximum TX AMPDU length to smallest value of all + * connected stations. In AP mode this can be suboptimal, but we + * do not have a choice if some connected STA is not capable to + * receive the same amount of data like the others. + */ + if (sta->ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++; + rt2800_set_max_psdu_len(rt2x00dev); + } + + /* * Search for the first free WCID entry and return the corresponding * index. */ @@ -1457,9 +1480,16 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2800_sta_add); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid) +int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + int wcid = sta_priv->wcid; + + if (sta->ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--; + rt2800_set_max_psdu_len(rt2x00dev); + } if (wcid > WCID_END) return 0; @@ -1902,9 +1932,14 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, rt2x00dev->lna_gain = lna_gain; } +static inline bool rt2800_clk_is_20mhz(struct rt2x00_dev *rt2x00dev) +{ + return clk_get_rate(rt2x00dev->clk) == 20000000; +} + #define FREQ_OFFSET_BOUND 0x5f -static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) +static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev) { u8 freq_offset, prev_freq_offset; u8 rfcsr, prev_rfcsr; @@ -2075,7 +2110,9 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); - msleep(1); + + usleep_range(1000, 1500); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); } @@ -2380,7 +2417,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (conf_is_ht40(conf)) { txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40, @@ -2570,7 +2607,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (rf->channel <= 14) { if (rf->channel == 6) @@ -2611,7 +2648,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev, else rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); @@ -2676,7 +2713,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); if (rf->channel <= 14) { int idx = rf->channel-1; @@ -2723,6 +2760,13 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); + } else if (rt2x00_rt(rt2x00dev, RT5350)) { + static const char r59_non_bt[] = {0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, + 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; + + rt2800_rfcsr_write(rt2x00dev, 59, + r59_non_bt[idx]); } } } @@ -2971,7 +3015,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev, } /* TODO proper frequency adjustment */ - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); /* TODO merge with others */ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); @@ -3160,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info); break; case RF3070: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -3178,6 +3223,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rf(rt2x00dev, RF3070) || rt2x00_rf(rt2x00dev, RF3290) || rt2x00_rf(rt2x00dev, RF3322) || + rt2x00_rf(rt2x00dev, RF5350) || rt2x00_rf(rt2x00dev, RF5360) || rt2x00_rf(rt2x00dev, RF5362) || rt2x00_rf(rt2x00dev, RF5370) || @@ -3185,8 +3231,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_rf(rt2x00dev, RF5390) || rt2x00_rf(rt2x00dev, RF5392)) { rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0); - rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0); + if (rt2x00_rf(rt2x00dev, RF3322)) { + rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M, + conf_is_ht40(conf)); + rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_RX_H20M, + conf_is_ht40(conf)); + } else { + rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, + conf_is_ht40(conf)); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, + conf_is_ht40(conf)); + } rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); @@ -3197,11 +3252,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, /* * Change BBP settings */ + if (rt2x00_rt(rt2x00dev, RT3352)) { + rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 27, 0x0); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x20); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 83, 0x6a); } else if (rt2x00_rt(rt2x00dev, RT3593)) { if (rf->channel > 14) { /* Disable CCK Packet detection on 5GHz */ @@ -3407,7 +3469,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, } } - msleep(1); + usleep_range(1000, 1500); /* * Clear channel statistic counters @@ -3419,7 +3481,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, /* * Clear update flag */ - if (rt2x00_rt(rt2x00dev, RT3352)) { + if (rt2x00_rt(rt2x00dev, RT3352) || + rt2x00_rt(rt2x00dev, RT5350)) { rt2800_bbp_read(rt2x00dev, 49, &bbp); rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0); rt2800_bbp_write(rt2x00dev, 49, bbp); @@ -4300,21 +4363,25 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) case RF3053: case RF3070: case RF3290: + case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: + case RF5592: rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); break; default: + WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration", + rt2x00dev->chip.rf); return; } - mdelay(1); + usleep_range(1000, 1500); rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin); if (rt2x00dev->rf_channel <= 14) { @@ -4536,6 +4603,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner); */ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) { + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u16 eeprom; unsigned int i; @@ -4678,6 +4746,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); + } else if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -4702,14 +4772,18 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®); rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); - if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || - rt2x00_rt(rt2x00dev, RT2883) || - rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) - rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2); - else - rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1); - rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 0); - rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 0); + if (rt2x00_is_usb(rt2x00dev)) { + drv_data->max_psdu = 3; + } else if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || + rt2x00_rt(rt2x00dev, RT2883) || + rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) { + drv_data->max_psdu = 2; + } else { + drv_data->max_psdu = 1; + } + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, drv_data->max_psdu); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 10); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 10); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); rt2800_register_read(rt2x00dev, LED_CFG, ®); @@ -4725,8 +4799,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®); - rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 15); - rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 31); + rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 2); + rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 2); rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_THRE, 2000); rt2x00_set_field32(®, TX_RTY_CFG_NON_AGG_RTY_MODE, 0); rt2x00_set_field32(®, TX_RTY_CFG_AGG_RTY_MODE, 0); @@ -4858,10 +4932,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg); rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®); - rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32); + rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7); rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, IEEE80211_MAX_RTS_THRESHOLD); - rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 0); + rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 1); rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); @@ -5319,9 +5393,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 82, 0x62); - rt2800_bbp_write(rt2x00dev, 83, 0x6a); - - rt2800_bbp_write(rt2x00dev, 84, 0x99); + if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + } else { + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + rt2800_bbp_write(rt2x00dev, 84, 0x99); + } rt2800_bbp_write(rt2x00dev, 86, 0x38); @@ -5335,9 +5413,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 104, 0x92); - rt2800_bbp_write(rt2x00dev, 105, 0x34); - - rt2800_bbp_write(rt2x00dev, 106, 0x05); + if (rt2x00_rt(rt2x00dev, RT5350)) { + rt2800_bbp_write(rt2x00dev, 105, 0x3c); + rt2800_bbp_write(rt2x00dev, 106, 0x03); + } else { + rt2800_bbp_write(rt2x00dev, 105, 0x34); + rt2800_bbp_write(rt2x00dev, 106, 0x05); + } rt2800_bbp_write(rt2x00dev, 120, 0x50); @@ -5362,6 +5444,16 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 143, 0xa2); rt2800_bbp_write(rt2x00dev, 148, 0xc8); + + if (rt2x00_rt(rt2x00dev, RT5350)) { + /* Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 150, 0x40); + /* Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 151, 0x30); + rt2800_bbp_write(rt2x00dev, 152, 0xa3); + /* Clear previously selected antenna */ + rt2800_bbp_write(rt2x00dev, 154, 0); + } } static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) @@ -5662,6 +5754,7 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_init_bbp_3290(rt2x00dev); break; case RT3352: + case RT5350: rt2800_init_bbp_3352(rt2x00dev); break; case RT3390: @@ -6135,6 +6228,12 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) { + int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0, + &rt2x00dev->cap_flags); + int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1, + &rt2x00dev->cap_flags); + u8 rfcsr; + rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); @@ -6170,15 +6269,30 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); - rt2800_rfcsr_write(rt2x00dev, 34, 0x01); + rfcsr = 0x01; + if (!tx0_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1); + if (!tx1_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1); + rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); rt2800_rfcsr_write(rt2x00dev, 35, 0x03); rt2800_rfcsr_write(rt2x00dev, 36, 0xbd); rt2800_rfcsr_write(rt2x00dev, 37, 0x3c); rt2800_rfcsr_write(rt2x00dev, 38, 0x5f); rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); rt2800_rfcsr_write(rt2x00dev, 40, 0x33); - rt2800_rfcsr_write(rt2x00dev, 41, 0x5b); - rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); + rfcsr = 0x52; + if (tx0_int_pa) { + rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1); + rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1); + } + rt2800_rfcsr_write(rt2x00dev, 41, rfcsr); + rfcsr = 0x52; + if (tx1_int_pa) { + rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1); + rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1); + } + rt2800_rfcsr_write(rt2x00dev, 42, rfcsr); rt2800_rfcsr_write(rt2x00dev, 43, 0xdb); rt2800_rfcsr_write(rt2x00dev, 44, 0xdb); rt2800_rfcsr_write(rt2x00dev, 45, 0xdb); @@ -6186,15 +6300,20 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 47, 0x0d); rt2800_rfcsr_write(rt2x00dev, 48, 0x14); rt2800_rfcsr_write(rt2x00dev, 49, 0x00); - rt2800_rfcsr_write(rt2x00dev, 50, 0x2d); - rt2800_rfcsr_write(rt2x00dev, 51, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 52, 0x00); - rt2800_rfcsr_write(rt2x00dev, 53, 0x52); - rt2800_rfcsr_write(rt2x00dev, 54, 0x1b); - rt2800_rfcsr_write(rt2x00dev, 55, 0x7f); - rt2800_rfcsr_write(rt2x00dev, 56, 0x00); - rt2800_rfcsr_write(rt2x00dev, 57, 0x52); - rt2800_rfcsr_write(rt2x00dev, 58, 0x1b); + rfcsr = 0x2d; + if (!tx0_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1); + if (!tx1_int_pa) + rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1); + rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); + rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2)); + rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49)); + rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0)); rt2800_rfcsr_write(rt2x00dev, 59, 0x00); rt2800_rfcsr_write(rt2x00dev, 60, 0x00); rt2800_rfcsr_write(rt2x00dev, 61, 0x00); @@ -6415,7 +6534,7 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1); @@ -6446,6 +6565,76 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) /* TODO: enable stream mode support */ } +static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev) +{ + rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); + rt2800_rfcsr_write(rt2x00dev, 1, 0x23); + rt2800_rfcsr_write(rt2x00dev, 2, 0x50); + rt2800_rfcsr_write(rt2x00dev, 3, 0x08); + rt2800_rfcsr_write(rt2x00dev, 4, 0x49); + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 9, 0x02); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0x46); + if (rt2800_clk_is_20mhz(rt2x00dev)) + rt2800_rfcsr_write(rt2x00dev, 13, 0x1f); + else + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0xc0); + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write(rt2x00dev, 27, 0x03); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0xd0); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); + rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0c); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa6); + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x80); + rt2800_rfcsr_write(rt2x00dev, 50, 0x00); + rt2800_rfcsr_write(rt2x00dev, 51, 0x00); + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); + rt2800_rfcsr_write(rt2x00dev, 54, 0x38); + rt2800_rfcsr_write(rt2x00dev, 55, 0x43); + rt2800_rfcsr_write(rt2x00dev, 56, 0x82); + rt2800_rfcsr_write(rt2x00dev, 57, 0x00); + rt2800_rfcsr_write(rt2x00dev, 58, 0x39); + rt2800_rfcsr_write(rt2x00dev, 59, 0x0b); + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); +} + static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 2); @@ -6641,7 +6830,7 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 2, 0x80); msleep(1); - rt2800_adjust_freq_offset(rt2x00dev); + rt2800_freq_cal_mode1(rt2x00dev); /* Enable DC filter */ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) @@ -6683,6 +6872,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) case RT3593: rt2800_init_rfcsr_3593(rt2x00dev); break; + case RT5350: + rt2800_init_rfcsr_5350(rt2x00dev); + break; case RT5390: rt2800_init_rfcsr_5390(rt2x00dev); break; @@ -7060,6 +7252,10 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); + else if (rt2x00_rt(rt2x00dev, RT3352)) + rf = RF3322; + else if (rt2x00_rt(rt2x00dev, RT5350)) + rf = RF5350; else rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); @@ -7078,6 +7274,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF3290: case RF3320: case RF3322: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -7149,7 +7346,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Detect if this device has Bluetooth co-existence. */ - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) + if (!rt2x00_rt(rt2x00dev, RT3352) && + rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) __set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags); /* @@ -7178,6 +7376,22 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) EIRP_MAX_TX_POWER_LIMIT) __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags); + /* + * Detect if device uses internal or external PA + */ + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + + if (rt2x00_rt(rt2x00dev, RT3352)) { + if (!rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352)) + __set_bit(CAPABILITY_INTERNAL_PA_TX0, + &rt2x00dev->cap_flags); + if (!rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352)) + __set_bit(CAPABILITY_INTERNAL_PA_TX1, + &rt2x00dev->cap_flags); + } + return 0; } @@ -7322,6 +7536,27 @@ static const struct rf_channel rf_vals_3x[] = { {173, 0x61, 0, 9}, }; +/* + * RF value list for rt3xxx with Xtal20MHz + * Supports: 2.4 GHz (all) (RF3322) + */ +static const struct rf_channel rf_vals_3x_xtal20[] = { + {1, 0xE2, 2, 0x14}, + {2, 0xE3, 2, 0x14}, + {3, 0xE4, 2, 0x14}, + {4, 0xE5, 2, 0x14}, + {5, 0xE6, 2, 0x14}, + {6, 0xE7, 2, 0x14}, + {7, 0xE8, 2, 0x14}, + {8, 0xE9, 2, 0x14}, + {9, 0xEA, 2, 0x14}, + {10, 0xEB, 2, 0x14}, + {11, 0xEC, 2, 0x14}, + {12, 0xED, 2, 0x14}, + {13, 0xEE, 2, 0x14}, + {14, 0xF0, 2, 0x18}, +}; + static const struct rf_channel rf_vals_5592_xtal20[] = { /* Channel, N, K, mod, R */ {1, 482, 4, 10, 3}, @@ -7470,6 +7705,13 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; /* + * Change default retry settings to values corresponding more closely + * to rate[0].count setting of minstrel rate control algorithm. + */ + rt2x00dev->hw->wiphy->retry_short = 2; + rt2x00dev->hw->wiphy->retry_long = 2; + + /* * Initialize all hw fields. */ ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS); @@ -7536,6 +7778,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3290: case RF3320: case RF3322: + case RF5350: case RF5360: case RF5362: case RF5370: @@ -7543,7 +7786,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF5390: case RF5392: spec->num_channels = 14; - spec->channels = rf_vals_3x; + if (rt2800_clk_is_20mhz(rt2x00dev)) + spec->channels = rf_vals_3x_xtal20; + else + spec->channels = rf_vals_3x; break; case RF3052: @@ -7593,7 +7839,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT; - spec->ht.ampdu_factor = 3; + spec->ht.ampdu_factor = (rx_chains > 1) ? 3 : 2; spec->ht.ampdu_density = 4; spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (tx_chains != rx_chains) { @@ -7669,12 +7915,14 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3053: case RF3070: case RF3290: + case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: + case RF5592: __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); break; } @@ -7708,6 +7956,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev) case RT3390: case RT3572: case RT3593: + case RT5350: case RT5390: case RT5392: case RT5592: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 83f1a44fb9b4..0a8b4df665fe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -183,7 +183,7 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct ieee80211_key_conf *key); int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid); +int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta); void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index f38c44061b5b..205a7b8ac8a7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -123,7 +123,7 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry) if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; - tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100)); + tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500)); if (unlikely(tout)) rt2x00_dbg(entry->queue->rt2x00dev, "TX status timeout for entry %d in queue %d\n", @@ -436,47 +436,6 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, } /* - * Watchdog handlers - */ -static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev) -{ - unsigned int i; - u32 reg; - - rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, ®); - if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) { - rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n"); - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012); - - for (i = 0; i < 10; i++) { - udelay(10); - if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) - break; - } - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006); - } - - rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, ®); - if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) { - rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n"); - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a); - - for (i = 0; i < 10; i++) { - udelay(10); - if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) - break; - } - - rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006); - } - - rt2x00usb_watchdog(rt2x00dev); -} - -/* * TX descriptor initialization */ static __le32 *rt2800usb_get_txwi(struct queue_entry *entry) @@ -643,10 +602,9 @@ static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev) !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; - if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) + if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) || + rt2800usb_entry_txstatus_timeout(entry)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); - else if (rt2800usb_entry_txstatus_timeout(entry)) - rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); else break; } @@ -877,7 +835,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, - .watchdog = rt2800usb_watchdog, .start_queue = rt2800usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2800usb_stop_queue, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index aa3d4ceef4ad..ea299c4e7ada 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -39,6 +39,7 @@ #include <linux/hrtimer.h> #include <linux/average.h> #include <linux/usb.h> +#include <linux/clk.h> #include <net/mac80211.h> @@ -169,6 +170,7 @@ struct rt2x00_chip { #define RT3572 0x3572 #define RT3593 0x3593 #define RT3883 0x3883 /* WSOC */ +#define RT5350 0x5350 /* WSOC 2.4GHz */ #define RT5390 0x5390 /* 2.4GHz */ #define RT5392 0x5392 /* 2.4GHz */ #define RT5592 0x5592 @@ -627,7 +629,7 @@ struct rt2x00lib_ops { struct ieee80211_vif *vif, struct ieee80211_sta *sta); int (*sta_remove) (struct rt2x00_dev *rt2x00dev, - int wcid); + struct ieee80211_sta *sta); }; /* @@ -716,6 +718,8 @@ enum rt2x00_capability_flags { CAPABILITY_DOUBLE_ANTENNA, CAPABILITY_BT_COEXIST, CAPABILITY_VCO_RECALIBRATION, + CAPABILITY_INTERNAL_PA_TX0, + CAPABILITY_INTERNAL_PA_TX1, }; /* @@ -834,6 +838,10 @@ struct rt2x00_dev { struct mutex csr_mutex; /* + * Mutex to synchronize config and link tuner. + */ + struct mutex conf_mutex; + /* * Current packet filter configuration for the device. * This contains all currently active FIF_* flags send * to us by mac80211 during configure_filter(). @@ -1005,6 +1013,9 @@ struct rt2x00_dev { unsigned int extra_tx_headroom; struct usb_anchor *anchor; + + /* Clock for System On Chip devices. */ + struct clk *clk; }; struct rt2x00_bar_list_entry { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 6a1f508d472f..350507458ddc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c @@ -249,6 +249,22 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, */ rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags); + if (conf->flags & IEEE80211_CONF_PS) + set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + else + clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + + if (conf->flags & IEEE80211_CONF_MONITOR) + set_bit(CONFIG_MONITORING, &rt2x00dev->flags); + else + clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); + + rt2x00dev->curr_band = conf->chandef.chan->band; + rt2x00dev->curr_freq = conf->chandef.chan->center_freq; + rt2x00dev->tx_power = conf->power_level; + rt2x00dev->short_retry = conf->short_frame_max_tx_count; + rt2x00dev->long_retry = conf->long_frame_max_tx_count; + /* * Some configuration changes affect the link quality * which means we need to reset the link tuner. @@ -271,20 +287,4 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, &rt2x00dev->autowakeup_work, autowake_timeout - 15); } - - if (conf->flags & IEEE80211_CONF_PS) - set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); - else - clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); - - if (conf->flags & IEEE80211_CONF_MONITOR) - set_bit(CONFIG_MONITORING, &rt2x00dev->flags); - else - clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); - - rt2x00dev->curr_band = conf->chandef.chan->band; - rt2x00dev->curr_freq = conf->chandef.chan->center_freq; - rt2x00dev->tx_power = conf->power_level; - rt2x00dev->short_retry = conf->short_frame_max_tx_count; - rt2x00dev->long_retry = conf->long_frame_max_tx_count; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index eb7b71443657..8fcbc8dc94c1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -87,9 +87,6 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) */ rt2x00queue_start_queues(rt2x00dev); rt2x00link_start_tuner(rt2x00dev); - rt2x00link_start_agc(rt2x00dev); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - rt2x00link_start_vcocal(rt2x00dev); /* * Start watchdog monitoring. @@ -112,9 +109,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) /* * Stop all queues */ - rt2x00link_stop_agc(rt2x00dev); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - rt2x00link_stop_vcocal(rt2x00dev); rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); @@ -1319,6 +1313,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); + mutex_init(&rt2x00dev->conf_mutex); INIT_LIST_HEAD(&rt2x00dev->bar_list); spin_lock_init(&rt2x00dev->bar_list_lock); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h index fb7c349ccc9c..9ddc1681b86a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00lib.h @@ -29,9 +29,10 @@ * Interval defines */ #define WATCHDOG_INTERVAL round_jiffies_relative(HZ) -#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) -#define AGC_INTERVAL round_jiffies_relative(4 * HZ) -#define VCO_INTERVAL round_jiffies_relative(10 * HZ) /* 10 sec */ +#define LINK_TUNE_SECONDS 1 +#define LINK_TUNE_INTERVAL round_jiffies_relative(LINK_TUNE_SECONDS * HZ) +#define AGC_SECONDS 4 +#define VCO_SECONDS 10 /* * rt2x00_rate: Per rate device information @@ -271,30 +272,6 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev); void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev); /** - * rt2x00link_start_agc - Start periodic gain calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_start_vcocal - Start periodic VCO calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_stop_agc - Stop periodic gain calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00link_stop_vcocal - Stop periodic VCO calibration - * @rt2x00dev: Pointer to &struct rt2x00_dev. - */ -void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev); - -/** * rt2x00link_register - Initialize link tuning & watchdog functionality * @rt2x00dev: Pointer to &struct rt2x00_dev. * diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c index 017188e5a736..2010a7715f21 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c @@ -233,15 +233,13 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) struct link *link = &rt2x00dev->link; /* - * Link tuning should only be performed when - * an active sta interface exists. AP interfaces - * don't need link tuning and monitor mode interfaces - * should never have to work with link tuners. + * Single monitor mode interfaces should never have + * work with link tuners. */ - if (!rt2x00dev->intf_sta_count) + if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) return; - /** + /* * While scanning, link tuning is disabled. By default * the most sensitive settings will be used to make sure * that all beacons and probe responses will be received @@ -308,22 +306,11 @@ static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev) qual->tx_failed = 0; } -static void rt2x00link_tuner(struct work_struct *work) +static void rt2x00link_tuner_sta(struct rt2x00_dev *rt2x00dev, struct link *link) { - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.work.work); - struct link *link = &rt2x00dev->link; struct link_qual *qual = &rt2x00dev->link.qual; /* - * When the radio is shutting down we should - * immediately cease all link tuning. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || - test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) - return; - - /* * Update statistics. */ rt2x00dev->ops->lib->link_stats(rt2x00dev, qual); @@ -360,6 +347,38 @@ static void rt2x00link_tuner(struct work_struct *work) */ if (rt2x00lib_antenna_diversity(rt2x00dev)) rt2x00link_reset_qual(rt2x00dev); +} + +static void rt2x00link_tuner(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.work.work); + struct link *link = &rt2x00dev->link; + + /* + * When the radio is shutting down we should + * immediately cease all link tuning. + */ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || + test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) + return; + + /* Do not race with rt2x00mac_config(). */ + mutex_lock(&rt2x00dev->conf_mutex); + + if (rt2x00dev->intf_sta_count) + rt2x00link_tuner_sta(rt2x00dev, link); + + if (rt2x00dev->ops->lib->gain_calibration && + (link->count % (AGC_SECONDS / LINK_TUNE_SECONDS)) == 0) + rt2x00dev->ops->lib->gain_calibration(rt2x00dev); + + if (rt2x00dev->ops->lib->vco_calibration && + rt2x00_has_cap_vco_recalibration(rt2x00dev) && + (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0) + rt2x00dev->ops->lib->vco_calibration(rt2x00dev); + + mutex_unlock(&rt2x00dev->conf_mutex); /* * Increase tuner counter, and reschedule the next link tuner run. @@ -408,85 +427,8 @@ static void rt2x00link_watchdog(struct work_struct *work) WATCHDOG_INTERVAL); } -void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev) -{ - struct link *link = &rt2x00dev->link; - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && - rt2x00dev->ops->lib->gain_calibration) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->agc_work, - AGC_INTERVAL); -} - -void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev) -{ - struct link *link = &rt2x00dev->link; - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && - rt2x00dev->ops->lib->vco_calibration) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->vco_work, - VCO_INTERVAL); -} - -void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev) -{ - cancel_delayed_work_sync(&rt2x00dev->link.agc_work); -} - -void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev) -{ - cancel_delayed_work_sync(&rt2x00dev->link.vco_work); -} - -static void rt2x00link_agc(struct work_struct *work) -{ - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.agc_work.work); - struct link *link = &rt2x00dev->link; - - /* - * When the radio is shutting down we should - * immediately cease the watchdog monitoring. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return; - - rt2x00dev->ops->lib->gain_calibration(rt2x00dev); - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->agc_work, - AGC_INTERVAL); -} - -static void rt2x00link_vcocal(struct work_struct *work) -{ - struct rt2x00_dev *rt2x00dev = - container_of(work, struct rt2x00_dev, link.vco_work.work); - struct link *link = &rt2x00dev->link; - - /* - * When the radio is shutting down we should - * immediately cease the VCO calibration. - */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return; - - rt2x00dev->ops->lib->vco_calibration(rt2x00dev); - - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - ieee80211_queue_delayed_work(rt2x00dev->hw, - &link->vco_work, - VCO_INTERVAL); -} - void rt2x00link_register(struct rt2x00_dev *rt2x00dev) { - INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc); - if (rt2x00_has_cap_vco_recalibration(rt2x00dev)) - INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal); INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog); INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 13da95a24cf7..ecc96312a370 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -320,6 +320,9 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) */ rt2x00queue_stop_queue(rt2x00dev->rx); + /* Do not race with with link tuner. */ + mutex_lock(&rt2x00dev->conf_mutex); + /* * When we've just turned on the radio, we want to reprogram * everything to ensure a consistent state @@ -335,6 +338,8 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) */ rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); + mutex_unlock(&rt2x00dev->conf_mutex); + /* Turn RX back on */ rt2x00queue_start_queue(rt2x00dev->rx); @@ -539,9 +544,8 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rt2x00_dev *rt2x00dev = hw->priv; - struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); - return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid); + return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta); } EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove); @@ -739,7 +743,8 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; tx_queue_for_each(rt2x00dev, queue) - rt2x00queue_flush_queue(queue, drop); + if (!rt2x00queue_empty(queue)) + rt2x00queue_flush_queue(queue, drop); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c index f0178fd4fe5f..da38d254c26f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c @@ -101,7 +101,7 @@ void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) unsigned int i; for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) - msleep(10); + msleep(50); } EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 68b620b2462f..b2364d378774 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -306,13 +306,12 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_sta *sta_priv = NULL; + u8 density = 0; if (sta) { - txdesc->u.ht.mpdu_density = - sta->ht_cap.ampdu_density; - sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; + density = sta->ht_cap.ampdu_density; } /* @@ -345,8 +344,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, return; } - txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ - /* * Only one STBC stream is supported for now. */ @@ -358,8 +355,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, * frames that are intended to probe a specific tx rate. */ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && - !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); + txdesc->u.ht.mpdu_density = density; + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ + } /* * Set 40Mhz mode if necessary (for legacy rates this will diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index 69a0cdadb07f..29250f79c4a4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -93,6 +93,10 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) rt2x00dev->irq = platform_get_irq(pdev, 0); rt2x00dev->name = pdev->dev.driver->name; + rt2x00dev->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(rt2x00dev->clk)) + rt2x00dev->clk = NULL; + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); retval = rt2x00soc_alloc_reg(rt2x00dev); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 6005e14213ca..838ca58d2dd6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -517,7 +517,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) * Wait for a little while to give the driver * the oppurtunity to recover itself. */ - msleep(10); + msleep(50); } } EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index df551b2b56eb..95e3993d8a33 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c index f9e2050812ab..a41a29612582 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index a1178c5d6ad8..80fee699f58a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8192e specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c index aef373028155..174631132b96 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8723a specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 02b8ddd98a95..c4b86a84a721 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver - 8723b specific subdriver * - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 3a86675020a2..e544dd1d618c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1,7 +1,7 @@ /* * RTL8XXXU mac80211 USB driver * - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. @@ -48,7 +48,7 @@ static bool rtl8xxxu_dma_aggregation; static int rtl8xxxu_dma_agg_timeout = -1; static int rtl8xxxu_dma_agg_pages = -1; -MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); +MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>"); MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin"); @@ -6000,6 +6000,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, case 0x8176: case 0x8178: case 0x817f: + case 0x818b: untested = 0; break; } @@ -6196,6 +6197,12 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, +/* TP-Link TL-WN822N v4 */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0108, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +/* D-Link DWA-131 rev E1, tested by David Patiño */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3319, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, /* Tested by Myckel Habets */ {USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, @@ -6347,6 +6354,13 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, +/* found in rtl8192eu vendor driver */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0107, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab33, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, #endif { } }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 315ccfb2dff5..3d3e2e1ada6f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com> + * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 4ac928bf1f8e..01cf0a9aa31b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -207,8 +207,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, *highest supported RX rate */ if (rtlpriv->dm.supp_phymode_switch) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Support phy mode switch\n"); + pr_info("Support phy mode switch\n"); ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; @@ -389,8 +388,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n", - rtlhal->current_bandtype); + pr_err("Err BAND %d\n", + rtlhal->current_bandtype); } } /* <5> set hw caps */ @@ -544,7 +543,7 @@ int rtl_init_core(struct ieee80211_hw *hw) * mac80211 hw in _rtl_init_mac80211. */ if (rtl_regd_init(hw, rtl_reg_notifier)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n"); + pr_err("REGD init failed\n"); return 1; } @@ -1694,8 +1693,7 @@ void rtl_watchdog_wq_callback(void *data) * we should reconnect this AP */ if (rtlpriv->link_info.roam_times >= 5) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "AP off, try to reconnect now\n"); + pr_err("AP off, try to reconnect now\n"); rtlpriv->link_info.roam_times = 0; ieee80211_connection_loss( rtlpriv->mac80211.vif); @@ -1886,8 +1884,7 @@ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -2086,65 +2083,6 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len) } EXPORT_SYMBOL_GPL(rtl_recognize_peer); -/********************************************************* - * - * sysfs functions - * - *********************************************************/ -static ssize_t rtl_show_debug_level(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct ieee80211_hw *hw = dev_get_drvdata(d); - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel); -} - -static ssize_t rtl_store_debug_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ieee80211_hw *hw = dev_get_drvdata(d); - struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long val; - int ret; - - ret = kstrtoul(buf, 0, &val); - if (ret) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, - "%s is not in hex or decimal form.\n", buf); - } else { - rtlpriv->dbg.global_debuglevel = val; - RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, - "debuglevel:%x\n", - rtlpriv->dbg.global_debuglevel); - } - - return strnlen(buf, count); -} - -static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, - rtl_show_debug_level, rtl_store_debug_level); - -static struct attribute *rtl_sysfs_entries[] = { - - &dev_attr_debug_level.attr, - - NULL -}; - -/* - * "name" is folder name witch will be - * put in device directory like : - * sys/devices/pci0000:00/0000:00:1c.4/ - * 0000:06:00.0/rtl_sysfs - */ -struct attribute_group rtl_attribute_group = { - .name = "rtlsysfs", - .attrs = rtl_sysfs_entries, -}; -EXPORT_SYMBOL_GPL(rtl_attribute_group); - MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 74233d601a90..6c770aecebe7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -148,7 +148,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw, u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); u8 rtl_tid_to_ac(u8 tid); -extern struct attribute_group rtl_attribute_group; void rtl_easy_concurrent_retrytimer_callback(unsigned long data); extern struct rtl_global_var rtl_global_var; void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation); diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index 8fe8b4cfae6c..a0605d8e9970 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c @@ -285,8 +285,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is NULL.\n"); + pr_err("sta_addr is NULL.\n"); return TOTAL_CAM_ENTRY; } /* Does STA already exist? */ @@ -298,9 +297,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) /* Get a free CAM entry. */ for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) { if ((bitmap & BIT(0)) == 0) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", - rtlpriv->sec.hwsec_cam_bitmap, entry_idx); + pr_err("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", + rtlpriv->sec.hwsec_cam_bitmap, entry_idx); rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx; memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx], sta_addr, ETH_ALEN); @@ -319,14 +317,12 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is NULL.\n"); + pr_err("sta_addr is NULL.\n"); return; } if (is_zero_ether_addr(sta_addr)) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "sta_addr is %pM\n", sta_addr); + pr_err("sta_addr is %pM\n", sta_addr); return; } /* Does STA already exist? */ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index ded1493fee9c..179a699cc6ac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -117,8 +117,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, } found_alt: if (firmware->size > rtlpriv->max_fw_size) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is too big!\n"); + pr_err("Firmware is too big!\n"); release_firmware(firmware); return; } @@ -303,8 +302,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, (u8 *)(&mac->basic_rates)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "operation mode %d is not support!\n", vif->type); + pr_err("operation mode %d is not supported!\n", + vif->type); err = -EOPNOTSUPP; goto out; } @@ -764,9 +763,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) default: mac->bw_40 = false; mac->bw_80 = false; - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - channel_type); + pr_err("switch case %#x not processed\n", + channel_type); break; } } @@ -1399,8 +1397,7 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw, "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid); return rtl_rx_agg_stop(hw, sta, tid); default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "IEEE80211_AMPDU_ERR!!!!:\n"); + pr_err("IEEE80211_AMPDU_ERR!!!!:\n"); return -EOPNOTSUPP; } return 0; @@ -1532,12 +1529,11 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_type = AESCMAC_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n"); RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, - "HW don't support CMAC encrypiton, use software CMAC encrypiton\n"); + "HW don't support CMAC encryption, use software CMAC encryption\n"); err = -EOPNOTSUPP; goto out_unlock; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "alg_err:%x!!!!:\n", key->cipher); + pr_err("alg_err:%x!!!!:\n", key->cipher); goto out_unlock; } if (key_type == WEP40_ENCRYPTION || @@ -1613,8 +1609,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set pairwise key\n"); if (!sta) { - RT_ASSERT(false, - "pairwise key without mac_addr\n"); + WARN_ONCE(true, + "rtlwifi: pairwise key without mac_addr\n"); err = -EOPNOTSUPP; goto out_unlock; @@ -1662,8 +1658,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, rtl_cam_delete_one_entry(hw, mac_addr, key_idx); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "cmd_err:%x!!!!:\n", cmd); + pr_err("cmd_err:%x!!!!:\n", cmd); } out_unlock: mutex_unlock(&rtlpriv->locks.conf_mutex); @@ -1804,8 +1799,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); return true; default: - RT_ASSERT(false, - "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); + WARN_ONCE(true, + "rtlwifi: rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index 33905bbacad2..7ecac6116d5d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -26,35 +26,32 @@ #include <linux/moduleparam.h> -void rtl_dbgp_flag_init(struct ieee80211_hw *hw) +#ifdef CONFIG_RTLWIFI_DEBUG +void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, + const char *fmt, ...) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 i; + if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && + (level <= rtlpriv->cfg->mod_params->debug_level))) { + struct va_format vaf; + va_list args; - rtlpriv->dbg.global_debugcomponents = - COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND | - COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC | - COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC | - COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS | - COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD | - COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN | - COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 | - COMP_REGD | COMP_CHAN | COMP_BT_COEXIST; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - for (i = 0; i < DBGP_TYPE_MAX; i++) - rtlpriv->dbg.dbgp_type[i] = 0; + pr_info(":<%lx> %pV", in_interrupt(), &vaf); - /*Init Debug flag enable condition */ + va_end(args); + } } -EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init); +EXPORT_SYMBOL_GPL(_rtl_dbg_trace); -#ifdef CONFIG_RTLWIFI_DEBUG -void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, - const char *modname, const char *fmt, ...) +void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *fmt, ...) { - if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && - (level <= rtlpriv->dbg.global_debuglevel))) { + if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && + (level <= rtlpriv->cfg->mod_params->debug_level))) { struct va_format vaf; va_list args; @@ -63,13 +60,25 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, vaf.fmt = fmt; vaf.va = &args; - printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV", - modname, __builtin_return_address(0), - in_interrupt(), in_atomic(), - &vaf); + pr_info("%pV", &vaf); va_end(args); } } -EXPORT_SYMBOL_GPL(_rtl_dbg_trace); +EXPORT_SYMBOL_GPL(_rtl_dbg_print); + +void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *titlestring, + const void *hexdata, int hexdatalen) +{ + if (unlikely(((comp) & rtlpriv->cfg->mod_params->debug_mask) && + ((level) <= rtlpriv->cfg->mod_params->debug_level))) { + pr_info("In process \"%s\" (pid %i): %s\n", + current->comm, current->pid, titlestring); + print_hex_dump_bytes("", DUMP_PREFIX_NONE, + hexdata, hexdatalen); + } +} +EXPORT_SYMBOL_GPL(_rtl_dbg_print_data); + #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index 6156a79328c1..bf5339f1c1bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -36,7 +36,7 @@ *unexpected HW behavior, HW BUG *and so on. */ -#define DBG_EMERG 0 +/*#define DBG_EMERG 0 */ /* *Abnormal, rare, or unexpeted cases. @@ -166,55 +166,36 @@ enum dbgp_flag_e { #ifdef CONFIG_RTLWIFI_DEBUG -#define RT_ASSERT(_exp, fmt, ...) \ -do { \ - if (!(_exp)) { \ - printk(KERN_DEBUG KBUILD_MODNAME ":%s(): " fmt, \ - __func__, ##__VA_ARGS__); \ - } \ -} while (0) - - struct rtl_priv; -__printf(5, 6) +__printf(4, 5) void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, - const char *modname, const char *fmt, ...); + const char *fmt, ...); + +__printf(4, 5) +void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *fmt, ...); + +void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, + const char *titlestring, + const void *hexdata, int hexdatalen); #define RT_TRACE(rtlpriv, comp, level, fmt, ...) \ _rtl_dbg_trace(rtlpriv, comp, level, \ - KBUILD_MODNAME, fmt, ##__VA_ARGS__) + fmt, ##__VA_ARGS__) #define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \ -do { \ - if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \ - printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \ - ##__VA_ARGS__); \ - } \ -} while (0) + _rtl_dbg_print(rtlpriv, dbgtype, dbgflag, fmt, ##__VA_ARGS__) #define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \ _hexdatalen) \ -do { \ - if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) && \ - (_level <= rtlpriv->dbg.global_debuglevel))) { \ - printk(KERN_DEBUG "%s: In process \"%s\" (pid %i): %s\n", \ - KBUILD_MODNAME, current->comm, current->pid, \ - _titlestring); \ - print_hex_dump_bytes("", DUMP_PREFIX_NONE, \ - _hexdata, _hexdatalen); \ - } \ -} while (0) + _rtl_dbg_print_data(rtlpriv, _comp, _level, \ + _titlestring, _hexdata, _hexdatalen) #else struct rtl_priv; -__printf(2, 3) -static inline void RT_ASSERT(int exp, const char *fmt, ...) -{ -} - __printf(4, 5) static inline void RT_TRACE(struct rtl_priv *rtlpriv, int comp, int level, @@ -237,6 +218,4 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv, } #endif - -void rtl_dbgp_flag_init(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 7becfef6cd5c..eb58633e674a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -31,6 +31,9 @@ static const u8 MAX_PGPKT_SIZE = 9; static const u8 PGPKT_DATA_SIZE = 8; static const int EFUSE_MAX_SIZE = 512; +#define START_ADDRESS 0x1000 +#define REG_MCUFWDL 0x0080 + static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { {0, 0, 0, 2}, {0, 1, 0, 2}, @@ -1259,8 +1262,7 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, break; case EEPROM_93C46: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL8XXX did not boot from eeprom, check it !!\n"); + pr_err("RTL8XXX did not boot from eeprom, check it !!\n"); return 1; default: @@ -1321,3 +1323,45 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, return 0; } EXPORT_SYMBOL_GPL(rtl_get_hwinfo); + +void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *pu4byteptr = (u8 *)buffer; + u32 i; + + for (i = 0; i < size; i++) + rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i)); +} +EXPORT_SYMBOL_GPL(rtl_fw_block_write); + +void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, + u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8)(page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + rtl_fw_block_write(hw, buffer, size); +} +EXPORT_SYMBOL_GPL(rtl_fw_page_write); + +void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8)(fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + + *pfwlen = fwlen; +} +EXPORT_SYMBOL_GPL(rtl_fill_dummy); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index 51aa1210def5..1338ae63fe54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -111,5 +111,9 @@ void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, int max_size, u8 *hwinfo, int *params); +void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen); +void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, + u32 size); +void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 8bfe020edd3a..b402f438b1af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -174,9 +174,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlpci->const_support_pciaspm); + pr_err("switch case %#x not processed\n", + rtlpci->const_support_pciaspm); break; } @@ -1247,9 +1246,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, &buffer_desc_dma); if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate TX ring (prio = %d)\n", - prio); + pr_err("Cannot allocate TX ring (prio = %d)\n", + prio); return -ENOMEM; } @@ -1266,8 +1264,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, sizeof(*desc) * entries, &desc_dma); if (!desc || (unsigned long)desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate TX ring (prio = %d)\n", prio); + pr_err("Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } @@ -1314,8 +1311,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].buffer_desc || (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate RX ring\n"); + pr_err("Cannot allocate RX ring\n"); return -ENOMEM; } @@ -1338,8 +1334,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].desc || (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Cannot allocate RX ring\n"); + pr_err("Cannot allocate RX ring\n"); return -ENOMEM; } @@ -1799,15 +1794,13 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) { - struct rtl_priv *rtlpriv = rtl_priv(hw); int err; _rtl_pci_init_struct(hw, pdev); err = _rtl_pci_init_trx_ring(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "tx ring initialization failed\n"); + pr_err("tx ring initialization failed\n"); return err; } @@ -2174,15 +2167,15 @@ int rtl_pci_probe(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { - RT_ASSERT(false, "%s : Cannot enable new PCI device\n", + WARN_ONCE(true, "%s : Cannot enable new PCI device\n", pci_name(pdev)); return err; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { - RT_ASSERT(false, - "Unable to obtain 32bit DMA for consistent allocations\n"); + WARN_ONCE(true, + "rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n"); err = -ENOMEM; goto fail1; } @@ -2193,7 +2186,7 @@ int rtl_pci_probe(struct pci_dev *pdev, hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) + sizeof(struct rtl_priv), &rtl_ops); if (!hw) { - RT_ASSERT(false, + WARN_ONCE(true, "%s : ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto fail1; @@ -2219,20 +2212,10 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->intf_ops = &rtl_pci_ops; rtlpriv->glb_var = &rtl_global_var; - /* - *init dbgp flags before all - *other functions, because we will - *use it in other funtions like - *RT_TRACE/RT_PRINT/RTL_PRINT_DATA - *you can not use these macro - *before this - */ - rtl_dbgp_flag_init(hw); - /* MEM map */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { - RT_ASSERT(false, "Can't obtain PCI resources\n"); + WARN_ONCE(true, "rtlwifi: Can't obtain PCI resources\n"); goto fail1; } @@ -2245,7 +2228,7 @@ int rtl_pci_probe(struct pci_dev *pdev, (unsigned long)pci_iomap(pdev, rtlpriv->cfg->bar_id, pmem_len); if (rtlpriv->io.pci_mem_start == 0) { - RT_ASSERT(false, "Can't map PCI mem\n"); + WARN_ONCE(true, "rtlwifi: Can't map PCI mem\n"); err = -ENOMEM; goto fail2; } @@ -2275,7 +2258,7 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->cfg->ops->read_eeprom_info(hw); if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + pr_err("Can't init_sw_vars\n"); err = -ENODEV; goto fail3; } @@ -2287,34 +2270,25 @@ int rtl_pci_probe(struct pci_dev *pdev, /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate sw for mac80211\n"); + pr_err("Can't allocate sw for mac80211\n"); goto fail3; } /* Init PCI sw */ err = rtl_pci_init(hw, pdev); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n"); + pr_err("Failed to init PCI\n"); goto fail3; } err = ieee80211_register_hw(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw.\n"); + pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; goto fail3; } rtlpriv->mac80211.mac80211_registered = 1; - err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "failed to create sysfs device attributes\n"); - goto fail3; - } - /*init rfkill */ rtl_init_rfkill(hw); /* Init PCI sw */ @@ -2364,8 +2338,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev) wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); - sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group); - /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index d0ffc4d508cf..4f8327097086 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -150,8 +150,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", state_toset); + pr_err("switch case %#x not processed\n", state_toset); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index ce8621a0f7aa..951d257cd4c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -267,8 +267,7 @@ static void *rtl_rate_alloc_sta(void *ppriv, rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp); if (!rate_priv) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unable to allocate private rc structure\n"); + pr_err("Unable to allocate private rc structure\n"); return NULL; } diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 6ee6bf8e7eaf..558c31bf5c80 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -440,7 +440,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, - "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n"); + "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n"); rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 5360d5332359..21ed9ad3be7a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -53,63 +54,6 @@ static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl88e_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4BytePtr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl88e_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl88e_fw_block_write(hw, buffer, size); -} - -static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl88e_write_fw(struct ieee80211_hw *hw, enum version_8188e version, u8 *buffer, u32 size) { @@ -120,27 +64,24 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); - _rtl88e_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl88e_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remainsize) { offset = pagenums * FW_8192C_PAGE_SIZE; page = pagenums; - _rtl88e_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } @@ -157,15 +98,10 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -176,20 +112,15 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x.\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8192C_POLLING_DELAY); } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; @@ -234,13 +165,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, _rtl88e_enable_fw_download(hw, false); err = _rtl88e_fw_free_to_go(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "Firmware is ready to run!\n"); - } + if (err) + pr_err("Firmware is not ready to run!\n"); return 0; } @@ -309,8 +235,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, while (!write_sucess) { wait_writeh2c_limit--; if (wait_writeh2c_limit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -434,8 +359,8 @@ void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8188ee: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 37d6efc3d240..679e214415d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -358,8 +358,7 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -572,9 +571,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VOQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -737,8 +735,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) 2, array); break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -759,9 +756,8 @@ static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1096,7 +1092,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) rtstatus = _rtl88ee_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_info("Init MAC failed\n"); err = 1; goto exit; } @@ -1252,8 +1248,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; break; } @@ -1352,7 +1347,7 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8188ee: invalid aci: %d !\n", aci); break; } } @@ -1987,7 +1982,7 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl88ee_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl88ee_hal_customized_behavior(hw); } @@ -2354,8 +2349,8 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2373,9 +2368,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index fffaa92eda81..14a256062614 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -176,7 +176,7 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -220,7 +220,7 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -383,13 +383,13 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = @@ -1095,8 +1095,7 @@ void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1137,8 +1136,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1162,8 +1161,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1231,8 +1230,8 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8188ee: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1280,8 +1279,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8188ee: illegal channel for Zebra: %d\n", channel); _rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -1303,8 +1302,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } @@ -1367,7 +1366,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8188ee: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c index 26ac4c2903c7..30798b12a363 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c @@ -51,8 +51,7 @@ void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index f361808def47..7661cfa53032 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -131,8 +131,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0); rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -165,8 +163,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_info("Can't alloc buffer for fw.\n"); return 1; } @@ -177,8 +174,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_info("Failed to request firmware!\n"); return 1; } @@ -278,7 +274,8 @@ static struct rtl_mod_params rtl88ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = false, .msi_support = true, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl88ee_hal_cfg = { @@ -394,7 +391,8 @@ MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin"); module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl88ee_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl88ee_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl88ee_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); @@ -406,7 +404,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 3e3b88664883..09c908d4cf91 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -760,7 +760,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", desc_name); break; } @@ -779,7 +779,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -799,7 +799,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n", desc_name); break; } @@ -815,7 +815,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 7d152466152b..c7a77467b20e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "../rtl8192ce/reg.h" #include "../rtl8192ce/def.h" #include "fw_common.h" @@ -68,63 +69,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl92c_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl92c_fw_block_write(hw, buffer, size); -} - -static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl92c_write_fw(struct ieee80211_hw *hw, enum version_8192c version, u8 *buffer, u32 size) { @@ -140,30 +84,28 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, u32 page, offset; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) - _rtl92c_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pageNums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pageNums > 4) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 4\n"); - } + if (pageNums > 4) + pr_err("Page numbers should not greater then 4\n"); for (page = 0; page < pageNums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl92c_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remainsize) { offset = pageNums * FW_8192C_PAGE_SIZE; page = pageNums; - _rtl92c_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), + remainsize); } } else { - _rtl92c_fw_block_write(hw, buffer, size); + rtl_fw_block_write(hw, buffer, size); } } @@ -180,15 +122,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -198,20 +135,15 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; mdelay(FW_8192C_POLLING_DELAY); } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); + pr_err("Polling FW ready fail! REG_MCUFWDL:0x%08x.\n", + value32); exit: return err; @@ -250,13 +182,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) _rtl92c_enable_fw_download(hw, false); err = _rtl92c_fw_free_to_go(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Firmware is ready to run!\n"); - } + if (err) + pr_err("Firmware is not ready to run!\n"); return 0; } @@ -327,8 +254,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -485,8 +411,8 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8192c-common: return H2C cmd because of Fw download fail!!!\n"); return; } @@ -510,7 +436,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) { - RT_ASSERT(false, "8051 reset fail.\n"); + WARN_ONCE(true, "rtl8192c-common: 8051 reset fail.\n"); break; } udelay(50); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 94dd25cf1ca8..7c6e5d91439d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL(rtl92c_phy_set_bb_reg); u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_read deprecated!\n"); return 0; } EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read); @@ -86,7 +86,7 @@ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_write deprecated!\n"); } EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write); @@ -104,7 +104,7 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0x3f; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -152,7 +152,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0x3f; @@ -209,7 +209,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } if (rtlphy->rf_type == RF_1T2R) { @@ -222,13 +222,13 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = @@ -745,8 +745,8 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8192c-common: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -792,7 +792,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8192c-common: cmdtable cannot be NULL.\n"); return false; } @@ -837,8 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192c-common: illegal channel for Zebra: %d\n", channel); _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -860,8 +860,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 4483d40ecad1..611987dfc207 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -140,8 +140,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -364,9 +363,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_VoqEn); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -551,8 +549,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array); break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %d not processed\n", variable); + pr_err("switch case %d not processed\n", variable); break; } } @@ -573,9 +570,8 @@ static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -963,7 +959,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl92ce_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1128,8 +1124,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) break; } - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Chip Version ID: %s\n", versionid); + pr_info("Chip Version ID: %s\n", versionid); switch (version & 0x3) { case CHIP_88C: @@ -1143,8 +1138,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) break; default: rtlphy->rf_type = RF_1T1R; - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "ERROR RF_Type is set!!\n"); + pr_err("ERROR RF_Type is set!!\n"); break; } @@ -1193,8 +1187,7 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw, "Set Network type to Mesh Point!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; } @@ -1292,7 +1285,7 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192ce: invalid aci: %d !\n", aci); break; } } @@ -1780,7 +1773,7 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92ce_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl92ce_hal_customized_behavior(hw); } @@ -2152,8 +2145,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2171,9 +2164,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index 833193b751f7..bdaa848995ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c @@ -57,8 +57,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -92,8 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_info("switch case %#x not processed\n", pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index d1b6a8fe7b6a..7c6d7fc1ef9a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c @@ -297,10 +297,10 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_info("Incorrect rfpath %#x\n", rfpath); break; default: + pr_info("switch case %#x not processed\n", rfpath); break; } return true; @@ -340,8 +340,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_info("unknown bandwidth: %#X\n", rtlphy->current_chan_bw); break; } @@ -365,8 +364,8 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -546,8 +545,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c index 7cae6350437c..e68ed7f37c79 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c @@ -51,8 +51,7 @@ void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index a33a06d58a9a..bcbb0c60f1f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -130,8 +130,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -158,8 +156,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -178,8 +175,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -254,7 +250,8 @@ static struct rtl_mod_params rtl92ce_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92ce_hal_cfg = { @@ -371,7 +368,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin"); module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92ce_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92ce_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444); @@ -379,7 +377,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 2ab4a00246cc..3616ba21959d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -670,7 +670,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", desc_name); break; } @@ -690,7 +690,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -710,7 +710,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n", desc_name); break; } @@ -726,7 +726,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(p_desc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 5c7da0cfc684..9db6ec62787a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -452,8 +452,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) break; } if (pollingCount++ > 100) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); return -ENODEV; } } while (true); @@ -486,8 +485,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) break; } if (pollingCount++ > 1000) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); return -ENODEV; } } while (true); @@ -687,7 +685,6 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, u8 queue_sel) { u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; - struct rtl_priv *rtlpriv = rtl_priv(hw); if (!wmm_enable) { /* typical setting */ beQ = QUEUE_LOW; @@ -705,8 +702,7 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, hiQ = QUEUE_HIGH; } _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", - queue_sel); + pr_info("Tx queue select :0x%02x..\n", queue_sel); } static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw, @@ -765,8 +761,7 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw, break; } rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", - hq_sele); + pr_info("Tx queue select :0x%02x..\n", hq_sele); } static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, @@ -848,8 +843,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) err = _rtl92cu_init_power_on(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to init power on!\n"); + pr_err("Failed to init power on!\n"); return err; } if (!wmm_enable) { @@ -860,8 +854,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) : WMM_CHIP_A_TX_PAGE_BOUNDARY; } if (false == rtl92c_init_llt_table(hw, boundary)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to init LLT Table!\n"); + pr_err("Failed to init LLT Table!\n"); return -EINVAL; } _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums, @@ -986,7 +979,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; err = _rtl92cu_init_mac(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); + pr_err("init mac failed!\n"); goto exit; } err = rtl92c_download_fw(hw); @@ -1099,8 +1092,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) udelay(50); } if (retry_cnts >= 100) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "#####=> 8051 reset failed!.........................\n"); + pr_err("8051 reset failed!.........................\n"); /* if 8051 reset fail, reset MAC. */ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, @@ -1340,8 +1332,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); goto error_out; } rtl_write_byte(rtlpriv, MSR, bt_msr); @@ -1555,8 +1546,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -1790,7 +1780,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u4b_ac_param); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", + WARN_ONCE(true, "rtl8192cu: invalid aci: %d !\n", e_aci); break; } @@ -1926,8 +1916,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index c6240813ff7b..70ea6c5692a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c @@ -57,8 +57,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -90,8 +90,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index cf212f694db5..1b124eade846 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -157,9 +157,8 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n", - address, _LLT_OP_VALUE(value)); + pr_err("Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n", + address, _LLT_OP_VALUE(value)); status = false; break; } @@ -262,8 +261,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "illegal switch case\n"); + pr_err("illegal switch case\n"); enc_algo = CAM_TKIP; break; } @@ -280,9 +278,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index f35f435c094e..f068dd5317a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c @@ -274,8 +274,7 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; default: break; @@ -314,8 +313,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } switch (rtlphy->current_chan_bw) { @@ -336,8 +335,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -509,8 +508,8 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92c_phy_set_rf_sleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 5e3183024aa0..9cff6bc4049c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -51,8 +51,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index b84e13ac6ead..96c923b3feb4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -61,15 +61,13 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && @@ -158,13 +156,16 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { static struct rtl_mod_params rtl92cu_mod_params = { .sw_crypto = 0, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92cu_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92cu_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92cu_mod_params.debug_mask, ullong, 0644); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { /* rx */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 1ea878fa7901..1611e42479d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -241,7 +241,7 @@ u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) break; default: hw_queue_index = RTL_TXQ_BE; - RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n", + WARN_ONCE(true, "rtl8192cu: QSLT_BE queue, skb_queue:%d\n", mac80211_queue_index); break; } @@ -477,14 +477,14 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc) */ static void _rtl_tx_desc_checksum(u8 *txdesc) { - u16 *ptr = (u16 *)txdesc; + __le16 *ptr = (__le16 *)txdesc; u16 checksum = 0; u32 index; /* Clear first */ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) - checksum = checksum ^ (*(ptr + index)); + checksum = checksum ^ le16_to_cpu(*(ptr + index)); SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index df88e39301c2..487eec89bc29 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -92,129 +92,107 @@ struct rx_drv_info_92c { u8 reserve:4; } __packed; -/* Define a macro that takes a le32 word, converts it to host ordering, - * right shifts by a specified count, creates a mask of the specified - * bit count, and extracts that number of bits. - */ - -#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \ - ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \ - BIT_LEN_MASK_32(__bits)) - -/* Define a macro that clears a bit field in an le32 word and - * sets the specified value into that bit field. The resulting - * value remains in le32 ordering; however, it is properly converted - * to host ordering for the clear and set operations before conversion - * back to le32. - */ - -#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \ - (*(__le32 *)(__pdesc) = \ - (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \ - (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \ - (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift))))); - /* macros to read various fields in RX descriptor */ /* DWORD 0 */ #define GET_RX_DESC_PKT_LEN(__rxdesc) \ - SHIFT_AND_MASK_LE((__rxdesc), 0, 14) + LE_BITS_TO_4BYTE((__rxdesc), 0, 14) #define GET_RX_DESC_CRC32(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc, 14, 1) #define GET_RX_DESC_ICV(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc, 15, 1) #define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 16, 4) + LE_BITS_TO_4BYTE(__rxdesc, 16, 4) #define GET_RX_DESC_SECURITY(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 20, 3) + LE_BITS_TO_4BYTE(__rxdesc, 20, 3) #define GET_RX_DESC_QOS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 23, 1) + LE_BITS_TO_4BYTE(__rxdesc, 23, 1) #define GET_RX_DESC_SHIFT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 24, 2) + LE_BITS_TO_4BYTE(__rxdesc, 24, 2) #define GET_RX_DESC_PHY_STATUS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 26, 1) + LE_BITS_TO_4BYTE(__rxdesc, 26, 1) #define GET_RX_DESC_SWDEC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 27, 1) + LE_BITS_TO_4BYTE(__rxdesc, 27, 1) #define GET_RX_DESC_LAST_SEG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 28, 1) + LE_BITS_TO_4BYTE(__rxdesc, 28, 1) #define GET_RX_DESC_FIRST_SEG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 29, 1) + LE_BITS_TO_4BYTE(__rxdesc, 29, 1) #define GET_RX_DESC_EOR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc, 30, 1) #define GET_RX_DESC_OWN(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc, 31, 1) + LE_BITS_TO_4BYTE(__rxdesc, 31, 1) /* DWORD 1 */ #define GET_RX_DESC_MACID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5) + LE_BITS_TO_4BYTE(__rxdesc + 4, 0, 5) #define GET_RX_DESC_TID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 5, 4) #define GET_RX_DESC_PAGGR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 14, 1) #define GET_RX_DESC_FAGGR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 15, 1) #define GET_RX_DESC_A1_FIT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 16, 4) #define GET_RX_DESC_A2_FIT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4) + LE_BITS_TO_4BYTE(__rxdesc + 4, 20, 4) #define GET_RX_DESC_PAM(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 24, 1) #define GET_RX_DESC_PWR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 25, 1) #define GET_RX_DESC_MORE_DATA(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 26, 1) #define GET_RX_DESC_MORE_FRAG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 27, 1) #define GET_RX_DESC_TYPE(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2) + LE_BITS_TO_4BYTE(__rxdesc + 4, 28, 2) #define GET_RX_DESC_MC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 30, 1) #define GET_RX_DESC_BC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1) + LE_BITS_TO_4BYTE(__rxdesc + 4, 31, 1) /* DWORD 2 */ #define GET_RX_DESC_SEQ(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12) + LE_BITS_TO_4BYTE(__rxdesc + 8, 0, 12) #define GET_RX_DESC_FRAG(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4) + LE_BITS_TO_4BYTE(__rxdesc + 8, 12, 4) #define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8) + LE_BITS_TO_4BYTE(__rxdesc + 8, 16, 8) #define GET_RX_DESC_NEXT_IND(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1) + LE_BITS_TO_4BYTE(__rxdesc + 8, 30, 1) /* DWORD 3 */ #define GET_RX_DESC_RX_MCS(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6) + LE_BITS_TO_4BYTE(__rxdesc + 12, 0, 6) #define GET_RX_DESC_RX_HT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 6, 1) #define GET_RX_DESC_AMSDU(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 7, 1) #define GET_RX_DESC_SPLCP(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 8, 1) #define GET_RX_DESC_BW(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 9, 1) #define GET_RX_DESC_HTC(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 10, 1) #define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 11, 1) #define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 12, 1) #define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 13, 1) #define GET_RX_DESC_HWPC_ERR(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 14, 1) #define GET_RX_DESC_HWPC_IND(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1) + LE_BITS_TO_4BYTE(__rxdesc + 12, 15, 1) #define GET_RX_DESC_IV0(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16) + LE_BITS_TO_4BYTE(__rxdesc + 12, 16, 16) /* DWORD 4 */ #define GET_RX_DESC_IV1(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32) + LE_BITS_TO_4BYTE(__rxdesc + 16, 0, 32) /* DWORD 5 */ #define GET_RX_DESC_TSFL(__rxdesc) \ - SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32) + LE_BITS_TO_4BYTE(__rxdesc + 20, 0, 32) /*======================= tx desc ============================================*/ @@ -222,182 +200,182 @@ struct rx_drv_info_92c { /* Dword 0 */ #define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 0, 16, __value) #define SET_TX_DESC_OFFSET(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 16, 8, __value) #define SET_TX_DESC_BMC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 24, 1, __value) #define SET_TX_DESC_HTC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 25, 1, __value) #define SET_TX_DESC_LAST_SEG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 26, 1, __value) #define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 27, 1, __value) #define SET_TX_DESC_LINIP(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 28, 1, __value) #define SET_TX_DESC_NO_ACM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 29, 1, __value) #define SET_TX_DESC_GF(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 30, 1, __value) #define SET_TX_DESC_OWN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value) /* Dword 1 */ #define SET_TX_DESC_MACID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value) #define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 5, 1, __value) #define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 6, 1, __value) #define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 7, 1, __value) #define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 8, 5, __value) #define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 13, 1, __value) #define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 14, 1, __value) #define SET_TX_DESC_PIFS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 15, 1, __value) #define SET_TX_DESC_RATE_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) #define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value) #define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 20, 1, __value) #define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 21, 1, __value) #define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 22, 2, __value) #define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 4, 26, 5, __value) /* Dword 2 */ #define SET_TX_DESC_RTS_RC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 0, 6, __value) #define SET_TX_DESC_DATA_RC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 6, 6, __value) #define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 14, 2, __value) #define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 17, 1, __value) #define SET_TX_DESC_RAW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 18, 1, __value) #define SET_TX_DESC_CCX(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 19, 1, __value) #define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 20, 3, __value) #define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 24, 1, __value) #define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 25, 1, __value) #define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 26, 2, __value) #define SET_TX_DESC_TX_ANTL(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 28, 2, __value) #define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 8, 30, 2, __value) /* Dword 3 */ #define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 0, 8, __value) #define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 8, 8, __value) #define SET_TX_DESC_SEQ(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 16, 12, __value) #define SET_TX_DESC_PKT_ID(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 12, 28, 4, __value) /* Dword 4 */ #define SET_TX_DESC_RTS_RATE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 0, 5, __value) #define SET_TX_DESC_AP_DCFE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 5, 1, __value) #define SET_TX_DESC_QOS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 6, 1, __value) #define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 7, 1, __value) #define SET_TX_DESC_USE_RATE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 8, 1, __value) #define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 9, 1, __value) #define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 10, 1, __value) #define SET_TX_DESC_CTS2SELF(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 11, 1, __value) #define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 12, 1, __value) #define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 13, 1, __value) #define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 18, 1, __value) #define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 19, 1, __value) #define SET_TX_DESC_DATA_SC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 20, 2, __value) #define SET_TX_DESC_DATA_STBC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 22, 2, __value) #define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 24, 1, __value) #define SET_TX_DESC_DATA_BW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 25, 1, __value) #define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 26, 1, __value) #define SET_TX_DESC_RTS_BW(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 27, 1, __value) #define SET_TX_DESC_RTS_SC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 28, 2, __value) #define SET_TX_DESC_RTS_STBC(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 16, 30, 2, __value) /* Dword 5 */ #define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 0, 6, __val) #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 6, 1, __val) #define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val) + SET_BITS_TO_LE_4BYTE(__pdesc + 20, 7, 1, __val) #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 8, 5, __value) #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 13, 4, __value) #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 17, 1, __value) #define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 18, 6, __value) #define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 20, 24, 8, __value) /* Dword 6 */ #define SET_TX_DESC_TXAGC_A(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 0, 5, __value) #define SET_TX_DESC_TXAGC_B(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 5, 5, __value) #define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 10, 1, __value) #define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 11, 5, __value) #define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 16, 4, __value) #define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 20, 4, __value) #define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 24, 4, __value) #define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 24, 28, 4, __value) /* Dword 7 */ #define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 0, 16, __value) #define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 16, 4, __value) #define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 20, 4, __value) #define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value) #define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ - SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value) + SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value) int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 17f6903c14bb..88faeab2574f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -26,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -59,86 +60,31 @@ static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92d_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *) buffer; - u32 *pu4BytePtr = (u32 *) buffer; - u32 i, offset, blockCount, remainSize; - - blockCount = size / blocksize; - remainSize = size % blocksize; - for (i = 0; i < blockCount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - if (remainSize) { - offset = blockCount * blocksize; - bufferptr += offset; - for (i = 0; i < remainSize; i++) { - rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl92d_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl92d_fw_block_write(hw, buffer, size); -} - -static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - *pfwlen = fwlen; -} - static void _rtl92d_write_fw(struct ieee80211_hw *hw, enum version_8192d version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 *bufferPtr = buffer; - u32 pagenums, remainSize; + u8 *bufferptr = buffer; + u32 pagenums, remainsize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) - _rtl92d_fill_dummy(bufferPtr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192D_PAGE_SIZE; - remainSize = size % FW_8192D_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + remainsize = size % FW_8192D_PAGE_SIZE; + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; - _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), - FW_8192D_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192D_PAGE_SIZE); } - if (remainSize) { + if (remainsize) { offset = pagenums * FW_8192D_PAGE_SIZE; page = pagenums; - _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), - remainSize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } @@ -153,13 +99,10 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", + value32); return -EIO; } - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); @@ -182,7 +125,7 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) udelay(50); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } - RT_ASSERT((delay > 0), "8051 reset failed!\n"); + WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n"); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "=====> 8051 reset success (%d)\n", delay); } @@ -326,13 +269,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) value &= (~BIT(5)); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "fw is not ready to run!\n"); - goto exit; - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n"); - } + if (err) + pr_err("fw is not ready to run!\n"); exit: err = _rtl92d_fw_init(hw); return err; @@ -407,8 +345,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } boxnum = rtlhal->last_hmeboxnum; @@ -430,8 +367,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); @@ -507,8 +444,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, boxcontent[idx]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } bwrite_success = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index fcb14c5db172..1bd1893bb401 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -163,8 +163,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HAL_DEF_WOWLAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -358,9 +357,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_VOQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -500,8 +498,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } } @@ -520,9 +517,8 @@ static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -920,7 +916,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw) /* rtlpriv->intf_ops->disable_aspm(hw); */ rtstatus = _rtl92de_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags); return err; @@ -1119,11 +1115,8 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; - break; - } rtl_write_byte(rtlpriv, MSR, bt_msr); rtlpriv->cfg->ops->led_control(hw, ledaction); @@ -1732,7 +1725,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) break; default: chipver |= CHIP_92D_D_CUT; - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n"); + pr_err("Unknown CUT!\n"); break; } rtlpriv->rtlhal.version = chipver; @@ -1816,7 +1809,7 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92de_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } return; } @@ -2169,8 +2162,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2186,9 +2179,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index c22b8a215c87..4be787e53279 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -66,8 +66,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -101,8 +101,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 424f54babd03..de98d88199d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -716,7 +716,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw) rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -731,13 +731,13 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, @@ -833,8 +833,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -987,8 +986,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } switch (rtlphy->current_chan_bw) { @@ -1019,8 +1018,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -2700,7 +2699,7 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL\n"); + WARN_ONCE(true, "rtl8192de: cmdtable cannot be NULL\n"); return false; } if (cmdtableidx >= cmdtablesz) @@ -2842,9 +2841,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rtl92d_phy_reload_iqk_setting(hw, channel); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - currentcmd->cmdid); + pr_err("switch case %#x not processed\n", + currentcmd->cmdid); break; } break; @@ -2893,17 +2891,17 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) * 5G and 2.4G band. */ if (channel <= 14) return 0; - RT_ASSERT((channel > 14), "5G but channel<=14\n"); + WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n"); break; case BAND_ON_2_4G: /* Get first channel error when change between * 5G and 2.4G band. */ if (channel > 14) return 0; - RT_ASSERT((channel <= 14), "2G but channel>14\n"); + WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n"); break; default: - RT_ASSERT(false, "Invalid WirelessMode(%#x)!!\n", + WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n", rtlpriv->mac80211.mode); break; } @@ -2956,9 +2954,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw) rtl92d_dm_write_dig(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlphy->current_io_type); + pr_err("switch case %#x not processed\n", + rtlphy->current_io_type); break; } rtlphy->set_io_inprogress = false; @@ -2988,8 +2985,8 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", iotype); + pr_err("switch case %#x not processed\n", + iotype); break; } } while (false); @@ -3176,8 +3173,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92d_phy_set_rfsleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } @@ -3336,7 +3333,7 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw) } } if (i == 200) - RT_ASSERT(false, "Another mac power off over time\n"); + WARN_ONCE(true, "rtl8192de: Another mac power off over time\n"); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 9dc9e915513e..021d3c538ac2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -63,8 +63,7 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 2d65e4095292..16132c66e5e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -140,8 +140,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -171,8 +169,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -185,8 +182,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -256,7 +252,8 @@ static struct rtl_mod_params rtl92de_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92de_hal_cfg = { @@ -366,15 +363,17 @@ MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin"); module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92de_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92de_mod_params.debug_level, int, 0644); module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444); +module_param_named(debug_mask, rtl92de_mod_params.debug_mask, ullong, 0644); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); @@ -402,7 +401,7 @@ static int __init rtl92de_module_init(void) ret = pci_register_driver(&rtl92de_driver); if (ret) - RT_ASSERT(false, "No device found\n"); + WARN_ONCE(true, "rtl8192de: No device found\n"); return ret; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 5fb37564957c..5c9c8741134f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -794,7 +794,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", desc_name); break; } @@ -814,7 +814,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -834,7 +834,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", desc_name); break; } @@ -848,7 +848,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name) ret = GET_RX_DESC_PKT_LEN(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index b3f6a9ed15d4..9d7a16c9e74e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -48,64 +49,6 @@ static void _rtl92ee_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl92ee_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, - (FW_8192C_START_ADDRESS + offset + i), - *(bufferptr + i)); - } - } -} - -static void _rtl92ee_fw_page_write(struct ieee80211_hw *hw, u32 page, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8)(page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - - _rtl92ee_fw_block_write(hw, buffer, size); -} - -static void _rtl92ee_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8)(fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl92ee_write_fw(struct ieee80211_hw *hw, enum version_8192e version, u8 *buffer, u32 size) @@ -117,28 +60,25 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size); - _rtl92ee_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); udelay(2); } if (remainsize) { offset = pagenums * FW_8192C_PAGE_SIZE; page = pagenums; - _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } @@ -155,15 +95,10 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", + value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -174,21 +109,15 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , - "Polling FW ready success!! REG_MCUFWDL:0x%08x. count = %d\n", - value32, counter); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8192C_POLLING_DELAY*10); } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n", - value32, counter); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n", + value32, counter); exit: return err; @@ -240,13 +169,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) _rtl92ee_enable_fw_download(hw, false); err = _rtl92ee_fw_free_to_go(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , - "Firmware is ready to run!\n"); - } return 0; } @@ -462,8 +384,8 @@ void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8192ee: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index ebf663e1a81a..b44244a8a22f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1006,7 +1006,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a); /* Note Data sheet don't define */ - rtl_write_word(rtlpriv, 0x4C7, 0x80); + rtl_write_byte(rtlpriv, 0x4C7, 0x80); rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); @@ -1320,7 +1320,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, 0x65, 1); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; return err; } @@ -1485,8 +1485,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -1582,7 +1581,7 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192ee: invalid aci: %d !\n", aci); break; } } @@ -2206,7 +2205,7 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92ee_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl92ee_hal_customized_behavior(hw); @@ -2484,9 +2483,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 5ad7e753c357..8b072ee8e0d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -170,7 +170,7 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -214,7 +214,7 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -662,12 +662,12 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) } _rtl92ee_phy_txpower_by_rate_configuration(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -1176,7 +1176,7 @@ static u8 _rtl92ee_phy_get_ratesection_intxpower_byrate(enum radio_path path, rate_section = 7; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n"); break; } return rate_section; @@ -1239,7 +1239,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n"); break; } @@ -1675,8 +1675,7 @@ void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1717,8 +1716,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1742,8 +1741,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1811,8 +1810,8 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8192ee: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1860,8 +1859,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192ee: illegal channel for Zebra: %d\n", channel); _rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, @@ -1884,8 +1883,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n" , *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } @@ -1948,7 +1947,7 @@ static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8192ee: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c index 73716c07d433..bc76a91da762 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c @@ -55,8 +55,7 @@ void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 46b605de36e7..554f2dc86bc5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -133,8 +133,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -165,8 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw\n"); + pr_err("Can't alloc buffer for fw\n"); return 1; } @@ -179,8 +176,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -260,7 +256,8 @@ static struct rtl_mod_params rtl92ee_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; static const struct rtl_hal_cfg rtl92ee_hal_cfg = { @@ -370,7 +367,8 @@ MODULE_DESCRIPTION("Realtek 8192EE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192eefw.bin"); module_param_named(swenc, rtl92ee_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92ee_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92ee_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92ee_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444); @@ -382,7 +380,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 2d48ccd02ac8..07440e9a8ca2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -991,8 +991,9 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR rxdesc :%d not processed\n", + desc_name); break; } } @@ -1011,8 +1012,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -1027,8 +1029,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8192ee: ERR rxdesc :%d not processed\n", + desc_name); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index 32f9207b5cf5..1922e78ad6bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -113,8 +113,7 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw) case RF_2T2R: return 0x22; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n", - rtlphy->rf_type); + pr_err("Unknown RF type(%x)\n", rtlphy->rf_type); break; } return 0x22; @@ -168,9 +167,7 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, _rtl92s_fw_set_rqpn(hw); if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Size over FIRMWARE_CODE_SIZE!\n"); - + pr_err("Size over FIRMWARE_CODE_SIZE!\n"); return false; } @@ -239,9 +236,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n", - cpustatus); + pr_err("FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n", + cpustatus); goto status_check_fail; } break; @@ -257,17 +253,15 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n", - cpustatus); + pr_err("FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n", + cpustatus); goto status_check_fail; } /* Turn On CPU */ rtstatus = _rtl92s_firmware_enable_cpu(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Enable CPU fail!\n"); + pr_err("Enable CPU fail!\n"); goto status_check_fail; } break; @@ -282,9 +276,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, } while (pollingcnt--); if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling DMEM code done fail ! cpustatus(%#x)\n", - cpustatus); + pr_err("Polling DMEM code done fail ! cpustatus(%#x)\n", + cpustatus); goto status_check_fail; } @@ -308,9 +301,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) || (pollingcnt <= 0)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling Load Firmware ready fail ! cpustatus(%x)\n", - cpustatus); + pr_err("Polling Load Firmware ready fail ! cpustatus(%x)\n", + cpustatus); goto status_check_fail; } @@ -331,8 +323,7 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, break; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Unknown status check!\n"); + pr_err("Unknown status check!\n"); rtstatus = false; break; } @@ -380,8 +371,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) /* 2. Retrieve IMEM image. */ if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size > sizeof(firmware->fw_imem))) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "memory for data image is less than IMEM required\n"); + pr_err("memory for data image is less than IMEM required\n"); goto fail; } else { puc_mappedfile += fwhdr_size; @@ -393,8 +383,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) /* 3. Retriecve EMEM image. */ if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "memory for data image is less than EMEM required\n"); + pr_err("memory for data image is less than EMEM required\n"); goto fail; } else { puc_mappedfile += firmware->fw_imem_len; @@ -428,8 +417,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unexpected Download step!!\n"); + pr_err("Unexpected Download step!!\n"); goto fail; } @@ -438,14 +426,14 @@ int rtl92s_download_fw(struct ieee80211_hw *hw) ul_filelength); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n"); + pr_err("fail!\n"); goto fail; } /* <3> Check whether load FW process is ready */ rtstatus = _rtl92s_firmware_checkready(hw, fwstatus); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n"); + pr_err("rtl8192se: firmware fail!\n"); goto fail; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 26e06b2837c3..d5e86b6fad11 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -75,11 +75,9 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HAL_DEF_WOWLAN: break; - default: { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); - break; - } + default: + pr_err("switch case %#x not processed\n", variable); + break; } } @@ -294,9 +292,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~AcmHw_VoqEn); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - e_aci); + pr_err("switch case %#x not processed\n", + e_aci); break; } } @@ -431,8 +428,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", variable); + pr_err("switch case %#x not processed\n", variable); break; } @@ -745,9 +741,8 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw) } while (pollingcnt--); if (pollingcnt <= 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n", - tmpu1b); + pr_err("Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n", + tmpu1b); tmpu1b = rtl_read_byte(rtlpriv, CMDR); rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN)); udelay(2); @@ -1004,7 +999,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */ if (!rtl92s_phy_mac_config(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n"); + pr_err("MAC Config failed\n"); err = rtstatus; goto exit; } @@ -1024,7 +1019,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */ if (!rtl92s_phy_bb_config(hw)) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n"); + pr_err("BB Config failed\n"); err = rtstatus; goto exit; } @@ -1194,8 +1189,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", type); + pr_err("Network type %d not supported!\n", type); return 1; } @@ -1251,7 +1245,7 @@ void rtl92se_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8192se: invalid aci: %d !\n", aci); break; } } @@ -1685,8 +1679,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) break; case EEPROM_93C46: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!\n"); + pr_err("RTL819X Not boot from eeprom, check it !!\n"); return; default: @@ -2030,7 +2023,7 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl92se_read_adapter_info(hw); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); rtlefuse->autoload_failflag = true; } } @@ -2463,8 +2456,8 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", enc_algo); + pr_err("switch case %#x not processed\n", + enc_algo); enc_algo = CAM_TKIP; break; } @@ -2481,9 +2474,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, - COMP_SEC, DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index 870007801f6b..c740aeb0e83f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c @@ -63,8 +63,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -99,8 +99,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index fcb9216af82d..86cb853f7169 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -235,7 +235,6 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (!is_hal_stop(rtlhal)) { @@ -247,8 +246,7 @@ void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw, rtl92s_phy_set_fw_cmd(hw, FW_CMD_RESUME_DM_BY_SCAN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown operation\n"); + pr_err("Unknown operation\n"); break; } } @@ -288,8 +286,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -313,8 +311,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x18); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -330,7 +328,7 @@ static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL\n"); + WARN_ONCE(true, "rtl8192se: cmdtable cannot be NULL\n"); return false; } @@ -374,8 +372,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "invalid channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8192se: invalid channel for Zebra: %d\n", channel); _rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -437,9 +435,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - currentcmd->cmdid); + pr_err("switch case %#x not processed\n", + currentcmd->cmdid); break; } @@ -644,8 +641,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw, _rtl92se_phy_set_rf_sleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } @@ -937,8 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } @@ -951,8 +947,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); + pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } @@ -1077,12 +1072,10 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw) (rtlphy->rf_type == RF_1T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "RF_Type(%x) does not match RF_Num(%x)!!\n", - rtlphy->rf_type, rf_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, - "path1 0x%x, path2 0x%x, pathmap 0x%x\n", - path1, path2, pathmap); + pr_err("RF_Type(%x) does not match RF_Num(%x)!!\n", + rtlphy->rf_type, rf_num); + pr_err("path1 0x%x, path2 0x%x, pathmap 0x%x\n", + path1, path2, pathmap); } return rtstatus; @@ -1221,7 +1214,7 @@ void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw) } while (--pollingcnt); if (pollingcnt == 0) - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Set FW Cmd fail!!\n"); + pr_err("Set FW Cmd fail!!\n"); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index bd2fa7735866..ea5b8ec45ec9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c @@ -523,8 +523,7 @@ void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 998cefbd7e89..2006b09ea74f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -96,8 +96,7 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) return; } if (firmware->size > rtlpriv->max_fw_size) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is too big!\n"); + pr_err("Firmware is too big!\n"); rtlpriv->max_fw_size = 0; release_firmware(firmware); return; @@ -179,8 +178,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpci->first_init = true; - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -218,8 +215,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl92se_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } @@ -299,7 +295,8 @@ static struct rtl_mod_params rtl92se_mod_params = { .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, }; /* Because memory R/W bursting will cause system hang/crash @@ -418,7 +415,8 @@ MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin"); module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl92se_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl92se_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl92se_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444); @@ -426,7 +424,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 9a5a11399221..12cef01e593b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -583,7 +583,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n", desc_name); break; } @@ -603,7 +603,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_STATUS_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -623,7 +623,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n", desc_name); break; } @@ -639,7 +639,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name) ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index e5505387260b..a954a87b0ed9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -99,8 +99,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -123,8 +122,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } @@ -229,8 +228,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } @@ -259,8 +258,8 @@ void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8723ae: error H2C cmd because of Fw download fail!!!\n"); return; } memset(tmp_cmdbuf, 0, 8); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index f8be0bd7e326..bb9de2f6e695 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -570,9 +570,8 @@ static bool _rtl8723e_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -961,7 +960,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw) rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl8712e_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1107,8 +1106,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw) "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Chip Version ID: Unknown. Bug?\n"); + pr_err("Chip Version ID: Unknown. Bug?\n"); break; } @@ -1157,8 +1155,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; break; } @@ -1256,7 +1253,7 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8723ae: invalid aci: %d !\n", aci); break; } } @@ -1852,7 +1849,7 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw) } else { rtlefuse->autoload_failflag = true; _rtl8723e_read_adapter_info(hw, false); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl8723e_hal_customized_behavior(hw); } @@ -2245,9 +2242,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index 77c10047cb20..e1e6d24f1daa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c @@ -58,8 +58,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -100,8 +100,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index 17b58cb32d55..5cf29f5a4b54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -133,7 +133,7 @@ static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { - RT_ASSERT(false, "deprecated!\n"); + WARN_ONCE(true, "rtl8723ae: _rtl8723e_phy_fw_rf_serial_write deprecated!\n"); } static void _rtl8723e_phy_bb_config_1t(struct ieee80211_hw *hw) @@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } @@ -227,13 +227,13 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, @@ -749,8 +749,7 @@ void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -791,8 +790,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -816,8 +815,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -885,8 +884,8 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8723ae: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -954,8 +953,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8723ae: illegal channel for Zebra: %d\n", channel); rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -977,8 +976,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c index 422771778e03..89958b64b52d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c @@ -51,8 +51,7 @@ void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index c51a9e8234e9..7bf9f2557920 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -145,8 +145,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) (u32)(PHIMR_RXFOVW | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -172,8 +170,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x6000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } @@ -186,8 +183,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } return 0; @@ -270,7 +266,8 @@ static struct rtl_mod_params rtl8723e_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, .msi_support = false, .disable_watchdog = false, }; @@ -384,7 +381,8 @@ MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin"); module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8723e_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8723e_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8723e_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444); @@ -396,7 +394,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index e93125ebed81..c9838f52a7ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -617,7 +617,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", desc_name); break; } @@ -636,7 +636,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", desc_name); break; } @@ -656,7 +656,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n", desc_name); break; } @@ -672,7 +672,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 8c5c27ce8e05..fbf396143985 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -97,8 +97,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger for FW INT!\n"); + pr_err("Write H2C fail because no trigger for FW INT!\n"); break; } @@ -121,8 +120,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", boxnum); + pr_err("switch case %#x not processed\n", + boxnum); break; } @@ -194,8 +193,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", cmd_len); + pr_err("switch case %#x not processed\n", + cmd_len); break; } @@ -224,8 +223,8 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8723be: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index aba60c3145c5..ae2a38ed4ba5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -747,9 +747,8 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1383,7 +1382,7 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) } rtstatus = _rtl8723be_init_mac(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; goto exit; } @@ -1532,8 +1531,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -1631,7 +1629,7 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8723be: invalid aci: %d !\n", aci); break; } } @@ -2247,7 +2245,7 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl8723be_read_adapter_info(hw, false); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } _rtl8723be_hal_customized_behavior(hw); } @@ -2584,9 +2582,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, - DBG_EMERG, - "Can not find free hw security cam entry\n"); + pr_err("Can not find free hw security cam entry\n"); return; } } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c index 497913eb3b37..8232e010d090 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c @@ -57,8 +57,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = true; @@ -99,8 +99,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", pled->ledpin); + pr_err("switch case %#x not processed\n", + pled->ledpin); break; } pled->ledon = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 3cc2232f25ca..ab0f39e46e1b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } _rtl8723be_phy_init_tx_power_by_rate(hw); @@ -478,13 +478,13 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) } phy_txpower_by_rate_config(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -939,7 +939,7 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n"); break; } @@ -1004,7 +1004,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n"); break; } tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num] @@ -1249,8 +1249,7 @@ void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -1291,8 +1290,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -1316,8 +1315,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); @@ -1387,8 +1386,8 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) return 0; if (rtlphy->set_bwmode_inprogress) return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14"); + WARN_ONCE((rtlphy->current_channel > 14), + "rtl8723be: WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; @@ -1438,8 +1437,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); + WARN_ONCE((channel < 1 || channel > 14), + "rtl8723be: illegal channel for Zebra: %d\n", channel); rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, @@ -1462,8 +1461,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd = &postcommoncmd[*step]; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Invalid 'stage' = %d, Check it!\n", *stage); + pr_err("Invalid 'stage' = %d, Check it!\n", + *stage); return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 78f4f18d87b5..48491454b878 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c @@ -51,8 +51,7 @@ void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 847644d1f5f5..e571b876f0af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -144,8 +144,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) HSIMR_RON_INT_EN | 0); - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -179,8 +177,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } @@ -190,8 +187,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request firmware!\n"); + pr_err("Failed to request firmware!\n"); return 1; } return 0; @@ -273,7 +269,8 @@ static struct rtl_mod_params rtl8723be_mod_params = { .fwctrl_lps = true, .msi_support = false, .disable_watchdog = false, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, .ant_sel = 0, }; @@ -388,7 +385,8 @@ MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin"); module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8723be_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); @@ -401,7 +399,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 2175aecbb8f4..6f65003a895a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -666,8 +666,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -685,8 +685,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not process\n", + desc_name); break; } } @@ -705,8 +705,8 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not process\n", + desc_name); break; } } else { @@ -721,7 +721,7 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not process\n", + WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not processed\n", desc_name); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 6e518625edbe..ac573d69f6d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -26,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../efuse.h" #include "fw_common.h" #include <linux/module.h> @@ -53,65 +54,6 @@ void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable) } EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download); -void rtl8723_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, - (FW_8192C_START_ADDRESS + offset + i), - *(bufferptr + i)); - } - } -} -EXPORT_SYMBOL_GPL(rtl8723_fw_block_write); - -void rtl8723_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - rtl8723_fw_block_write(hw, buffer, size); -} -EXPORT_SYMBOL_GPL(rtl8723_fw_page_write); - -void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8) (fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - *pfwlen = fwlen; -} -EXPORT_SYMBOL(rtl8723_fill_dummy); - void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, u8 *buffer, u32 size, u8 max_page) @@ -123,26 +65,25 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); - rtl8723_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); page_nums = size / FW_8192C_PAGE_SIZE; remain_size = size % FW_8192C_PAGE_SIZE; if (page_nums > max_page) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater than %d\n", max_page); + pr_err("Page numbers should not greater than %d\n", + max_page); } for (page = 0; page < page_nums; page++) { offset = page * FW_8192C_PAGE_SIZE; - rtl8723_fw_page_write(hw, page, (bufferptr + offset), - FW_8192C_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); } if (remain_size) { offset = page_nums * FW_8192C_PAGE_SIZE; page = page_nums; - rtl8723_fw_page_write(hw, page, (bufferptr + offset), - remain_size); + rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size); } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n"); } @@ -209,14 +150,10 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= max_count) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report fail ! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("chksum report fail ! REG_MCUFWDL:0x%08x .\n", + value32); goto exit; } - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY; value32 &= ~WINTINI_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); @@ -239,9 +176,8 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, } while (counter++ < max_count); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; @@ -293,13 +229,8 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, rtl8723_enable_fw_download(hw, false); err = rtl8723_fw_free_to_go(hw, is_8723be, max_count); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Firmware is ready to run!\n"); - } + if (err) + pr_err("Firmware is not ready to run!\n"); return 0; } EXPORT_SYMBOL_GPL(rtl8723_download_fw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index 8ea372d1626e..77c25a976233 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h @@ -28,7 +28,6 @@ #define REG_SYS_FUNC_EN 0x0002 #define REG_MCUFWDL 0x0080 -#define FW_8192C_START_ADDRESS 0x1000 #define FW_8192C_PAGE_SIZE 4096 #define FW_8723A_POLLING_TIMEOUT_COUNT 1000 #define FW_8723B_POLLING_TIMEOUT_COUNT 6000 @@ -84,10 +83,6 @@ enum rtl8723be_cmd { void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable); -void rtl8723_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size); -void rtl8723_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size); void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, u8 *buffer, u32 size, u8 max_page); @@ -95,6 +90,5 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count); int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count); bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); -void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c index 75cbd1509b52..43d24e1ee5e6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c @@ -99,7 +99,7 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, offset &= 0xff; newoffset = offset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); @@ -147,7 +147,7 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw, struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -283,7 +283,7 @@ bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + WARN_ONCE(true, "rtl8723-common: cmdtable cannot be NULL.\n"); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index bdfd444955d2..32900c51f024 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -604,8 +604,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw) if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { rtl_dm_dig->min_undec_pwdb_for_dm = 0; - RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "Not connected to any\n"); + pr_debug("rtl8821ae: Not connected to any AP\n"); } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index b665446351a4..328c64d465ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -27,6 +27,7 @@ #include "../pci.h" #include "../base.h" #include "../core.h" +#include "../efuse.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -51,63 +52,6 @@ static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blocksize = sizeof(u32); - u8 *bufferptr = (u8 *)buffer; - u32 *pu4byteptr = (u32 *)buffer; - u32 i, offset, blockcount, remainsize; - - blockcount = size / blocksize; - remainsize = size % blocksize; - - for (i = 0; i < blockcount; i++) { - offset = i * blocksize; - rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset), - *(pu4byteptr + i)); - } - - if (remainsize) { - offset = blockcount * blocksize; - bufferptr += offset; - for (i = 0; i < remainsize; i++) { - rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS + - offset + i), *(bufferptr + i)); - } - } -} - -static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8)(page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl8821ae_fw_block_write(hw, buffer, size); -} - -static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 fwlen = *pfwlen; - u8 remain = (u8)(fwlen % 4); - - remain = (remain == 0) ? 0 : (4 - remain); - - while (remain > 0) { - pfwbuf[fwlen] = 0; - fwlen++; - remain--; - } - - *pfwlen = fwlen; -} - static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, enum version_8821ae version, u8 *buffer, u32 size) @@ -119,27 +63,24 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); - _rtl8821ae_fill_dummy(bufferptr, &size); + rtl_fill_dummy(bufferptr, &size); pagenums = size / FW_8821AE_PAGE_SIZE; remainsize = size % FW_8821AE_PAGE_SIZE; - if (pagenums > 8) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); - } + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); for (page = 0; page < pagenums; page++) { offset = page * FW_8821AE_PAGE_SIZE; - _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), - FW_8821AE_PAGE_SIZE); + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8821AE_PAGE_SIZE); } if (remainsize) { offset = pagenums * FW_8821AE_PAGE_SIZE; page = pagenums; - _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } } @@ -161,10 +102,6 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) value32); goto exit; } - - RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; value32 &= ~WINTINI_RDY; @@ -175,20 +112,14 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) counter = 0; do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", - value32); - err = 0; - goto exit; - } + if (value32 & WINTINI_RDY) + return 0; udelay(FW_8821AE_POLLING_DELAY); } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT); - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", - value32); + pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); exit: return err; @@ -510,8 +441,8 @@ void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u32 tmp_cmdbuf[2]; if (!rtlhal->fw_ready) { - RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + WARN_ONCE(true, + "rtl8821ae: error H2C cmd because of Fw download fail!!!\n"); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 1281ebe0c30a..4f83eee1ff75 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -822,9 +822,8 @@ static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) break; if (count > POLLING_LLT_THRESHOLD) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at address %d!\n", - address); + pr_err("Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -1128,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } @@ -1927,7 +1926,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw) rtstatus = _rtl8821ae_init_mac(hw); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + pr_err("Init MAC failed\n"); err = 1; return err; } @@ -2174,8 +2173,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, "Set Network type to AP!\n"); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not support!\n", type); + pr_err("Network type %d not support!\n", type); return 1; } @@ -2249,7 +2247,7 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); + WARN_ONCE(true, "rtl8821ae: invalid aci: %d !\n", aci); break; } } @@ -2601,11 +2599,10 @@ static u8 _rtl8821ae_get_chnl_group(u8 chnl) group = 12; else if (173 <= chnl && chnl <= 177) group = 13; - else - /*RT_TRACE(rtlpriv, COMP_EFUSE,DBG_LOUD, - "5G, Channel %d in Group not found\n",chnl);*/ - RT_ASSERT(!COMP_EFUSE, - "5G, Channel %d in Group not found\n", chnl); + else + WARN_ONCE(true, + "rtl8821ae: 5G, Channel %d in Group not found\n", + chnl); } return group; } @@ -3276,7 +3273,7 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw) rtlefuse->autoload_failflag = false; _rtl8821ae_read_adapter_info(hw, false); } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + pr_err("Autoload ERR!!\n"); } /*hal_ReadRFType_8812A()*/ /* _rtl8821ae_hal_customized_behavior(hw); */ @@ -3951,8 +3948,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, if (mac->opmode == NL80211_IFTYPE_AP) { entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "Can not find free hwsecurity cam entry\n"); + pr_err("an not find free hwsecurity cam entry\n"); return; } } else { @@ -4135,8 +4131,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, count++; } while (tmp && count < 100); - RT_ASSERT((count < 100), - "Write wake up frame mask FAIL %d value!\n", tmp); + WARN_ONCE((count >= 100), + "rtl8821ae: Write wake up frame mask FAIL %d value!\n", + tmp); } /* Disable Rx packet buffer access. */ rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 5dad402171c2..c60f07aa4acf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -215,7 +215,6 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); bool is_pi_mode = false; u32 retvalue = 0; @@ -223,7 +222,7 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, /* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/ if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + pr_err("return all one\n"); return 0xFFFFFFFF; } /* <20120809, Kordan> CCA OFF(when entering), @@ -284,7 +283,7 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, u32 newoffset; if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + pr_err("stop\n"); return; } offset &= 0xff; @@ -1665,7 +1664,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); return false; } _rtl8821ae_phy_init_tx_power_by_rate(hw); @@ -1674,7 +1673,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + pr_err("BB_PG Reg Fail!!\n"); return false; } @@ -1688,7 +1687,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_AGC_TAB); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + pr_err("AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, @@ -1870,8 +1869,8 @@ static u8 _rtl8821ae_get_rate_section_index(u32 regaddr) else if (regaddr >= 0xE20 && regaddr <= 0xE4C) index = (u8)((regaddr - 0xE20) / 4); else - RT_ASSERT(!COMP_INIT, - "Invalid RegAddr 0x%x\n", regaddr); + WARN_ONCE(true, + "rtl8821ae: Invalid RegAddr 0x%x\n", regaddr); return index; } @@ -2064,8 +2063,7 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, break; case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -2132,8 +2130,7 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, case RF90_PATH_B: case RF90_PATH_C: case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpath); + pr_err("switch case %#x not processed\n", rfpath); break; } return true; @@ -2322,7 +2319,7 @@ static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) rate_section = 11; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n"); break; } @@ -2588,7 +2585,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, shift = 24; break; default: - RT_ASSERT(true, "Rate_Section is Illegal\n"); + WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n"); break; } @@ -3336,8 +3333,7 @@ void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) (u8 *)&iotype); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Unknown Scan Backup operation.\n"); + pr_err("Unknown Scan Backup operation.\n"); break; } } @@ -3378,8 +3374,7 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv) else if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER) sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) @@ -3394,16 +3389,14 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv) (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; else - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "SCMapping: Not Correct Primary40MHz Setting\n"); + pr_err("SCMapping: Not Correct Primary40MHz Setting\n"); } return (sc_set_40 << 4) | sc_set_20; } @@ -3479,8 +3472,8 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); break; } @@ -4660,8 +4653,8 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", iotype); + pr_err("switch case %#x not processed\n", + iotype); break; } } while (false); @@ -4704,9 +4697,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw) case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", - rtlphy->current_io_type); + pr_err("switch case %#x not processed\n", + rtlphy->current_io_type); break; } rtlphy->set_io_inprogress = false; @@ -4811,8 +4803,8 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case %#x not processed\n", rfpwr_state); + pr_err("switch case %#x not processed\n", + rfpwr_state); bresult = false; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c index c6ab957023e6..95489f41f8a0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c @@ -34,8 +34,6 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3); @@ -50,8 +48,7 @@ void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "unknown bandwidth: %#X\n", bandwidth); + pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 297938e0effd..cd2a53b7e053 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -160,8 +160,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET | WAKE_ON_PATTERN_MATCH; - /* for debug level */ - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; @@ -192,14 +190,12 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for fw.\n"); + pr_err("Can't alloc buffer for fw.\n"); return 1; } rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't alloc buffer for wowlan fw.\n"); + pr_err("Can't alloc buffer for wowlan fw.\n"); return 1; } @@ -218,8 +214,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request normal firmware!\n"); + pr_err("Failed to request normal firmware!\n"); return 1; } /*load wowlan firmware*/ @@ -229,8 +224,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_wowlan_fw_cb); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to request wowlan firmware!\n"); + pr_err("Failed to request wowlan firmware!\n"); return 1; } return 0; @@ -313,7 +307,8 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .fwctrl_lps = true, .msi_support = true, .int_clear = true, - .debug = DBG_EMERG, + .debug_level = 0, + .debug_mask = 0, .disable_watchdog = 0, }; @@ -434,7 +429,8 @@ MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444); +module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644); +module_param_named(debug_mask, rtl8821ae_mod_params.debug_mask, ullong, 0644); module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); @@ -447,7 +443,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 27727186ba5f..108098152cf3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -904,8 +904,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -923,8 +924,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR rxdesc :%d not processed\n", + desc_name); break; } } @@ -943,8 +945,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, - "ERR txdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR txdesc :%d not processed\n", + desc_name); break; } } else { @@ -959,8 +962,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_RX_DESC_BUFF_ADDR(pdesc); break; default: - RT_ASSERT(false, - "ERR rxdesc :%d not process\n", desc_name); + WARN_ONCE(true, + "rtl8821ae: ERR rxdesc :%d not processed\n", + desc_name); break; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 49015b05f3d1..4d989b8ab185 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -421,14 +421,12 @@ static void _rtl_rx_completed(struct urb *urb); static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb, struct urb *urb, gfp_t gfp_mask) { - struct rtl_priv *rtlpriv = rtl_priv(hw); void *buf; buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, &urb->transfer_dma); if (!buf) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to usb_alloc_coherent!!\n"); + pr_err("Failed to usb_alloc_coherent!!\n"); return -ENOMEM; } @@ -613,8 +611,6 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; - struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); - struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) @@ -628,17 +624,15 @@ static void _rtl_rx_completed(struct urb *_urb) struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Too short packet from bulk IN! (len: %d)\n", - size); + pr_err("Too short packet from bulk IN! (len: %d)\n", + size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Pending RX skbuff queue full! (qlen: %d)\n", - qlen); + pr_err("Pending RX skbuff queue full! (qlen: %d)\n", + qlen); goto resubmit; } @@ -647,8 +641,7 @@ static void _rtl_rx_completed(struct urb *_urb) skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Can't allocate skb for bulk IN!\n"); + pr_err("Can't allocate skb for bulk IN!\n"); goto resubmit; } @@ -725,7 +718,6 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) struct urb *urb; int err; int i; - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); @@ -740,8 +732,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to prep_rx_urb!!\n"); + pr_err("Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } @@ -827,19 +818,36 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct urb *urb; /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); + + /* free pre-allocated URBs from rtl_usb_start() */ + usb_kill_anchored_urbs(&rtlusb->rx_submitted); + + tasklet_kill(&rtlusb->rx_work_tasklet); + cancel_work_sync(&rtlpriv->works.lps_change_work); + + flush_workqueue(rtlpriv->works.rtl_wq); + + skb_queue_purge(&rtlusb->rx_queue); + + while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + } + rtlpriv->cfg->ops->hw_disable(hw); } static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) { int err; - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); usb_anchor_urb(_urb, &rtlusb->tx_submitted); @@ -847,8 +855,7 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) if (err < 0) { struct sk_buff *skb; - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to submit urb\n"); + pr_err("Failed to submit urb\n"); usb_unanchor_urb(_urb); skb = (struct sk_buff *)_urb->context; kfree_skb(skb); @@ -859,7 +866,6 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; @@ -870,8 +876,7 @@ static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, txinfo->flags |= IEEE80211_TX_STAT_ACK; if (urb->status) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Urb has error status 0x%X\n", urb->status); + pr_err("Urb has error status 0x%X\n", urb->status); goto out; } /* TODO: statistics */ @@ -919,7 +924,6 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, enum rtl_txq qnum) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; @@ -927,8 +931,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "USB device is stopping...\n"); + pr_err("USB device is stopping...\n"); kfree_skb(skb); return; } @@ -936,8 +939,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, _skb = skb; _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); if (unlikely(!_urb)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate urb. Drop skb!\n"); + pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); return; } @@ -1059,7 +1061,7 @@ int rtl_usb_probe(struct usb_interface *intf, hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { - RT_ASSERT(false, "ieee80211 alloc failed\n"); + WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; @@ -1090,7 +1092,6 @@ int rtl_usb_probe(struct usb_interface *intf, rtlpriv->rtlhal.interface = INTF_USB; rtlpriv->cfg = rtl_hal_cfg; rtlpriv->intf_ops = &rtl_usb_ops; - rtl_dbgp_flag_init(hw); /* Init IO handler */ _rtl_usb_io_handler_init(&udev->dev, hw); rtlpriv->cfg->ops->read_chip_version(hw); @@ -1103,20 +1104,18 @@ int rtl_usb_probe(struct usb_interface *intf, /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't allocate sw for mac80211\n"); + pr_err("Can't allocate sw for mac80211\n"); goto error_out; } if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + pr_err("Can't init_sw_vars\n"); goto error_out; } rtlpriv->cfg->ops->init_sw_leds(hw); err = ieee80211_register_hw(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw.\n"); + pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; goto error_out; } diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index dafe486f8448..310fa90200b2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2221,11 +2221,13 @@ struct rtl_intf_ops { }; struct rtl_mod_params { + /* default: 0,0 */ + u64 debug_mask; /* default: 0 = using hardware encryption */ bool sw_crypto; /* default: 0 = DBG_EMERG (0)*/ - int debug; + int debug_level; /* default: 1 = using no linked power save */ bool inactiveps; @@ -2345,16 +2347,6 @@ struct rtl_works { struct work_struct fill_h2c_cmd; }; -struct rtl_debug { - u32 dbgp_type[DBGP_TYPE_MAX]; - int global_debuglevel; - u64 global_debugcomponents; - - /* add for proc debug */ - struct proc_dir_entry *proc_dir; - char proc_name[20]; -}; - #define MIMO_PS_STATIC 0 #define MIMO_PS_DYNAMIC 1 #define MIMO_PS_NOLIMIT 3 @@ -2583,7 +2575,6 @@ struct rtl_priv { /* sta entry list for ap adhoc or mesh */ struct list_head entry_list; - struct rtl_debug dbg; int max_fw_size; /* @@ -2713,23 +2704,14 @@ enum bt_radio_shared { (le32_to_cpu(_val)) /* Read data from memory */ -#define READEF1BYTE(_ptr) \ +#define READEF1BYTE(_ptr) \ EF1BYTE(*((u8 *)(_ptr))) /* Read le16 data from memory and convert to host ordering */ -#define READEF2BYTE(_ptr) \ +#define READEF2BYTE(_ptr) \ EF2BYTE(*(_ptr)) -#define READEF4BYTE(_ptr) \ +#define READEF4BYTE(_ptr) \ EF4BYTE(*(_ptr)) -/* Write data to memory */ -#define WRITEEF1BYTE(_ptr, _val) \ - (*((u8 *)(_ptr))) = EF1BYTE(_val) -/* Write le16 data to memory in host ordering */ -#define WRITEEF2BYTE(_ptr, _val) \ - (*((u16 *)(_ptr))) = EF2BYTE(_val) -#define WRITEEF4BYTE(_ptr, _val) \ - (*((u32 *)(_ptr))) = EF2BYTE(_val) - /* Create a bit mask * Examples: * BIT_LEN_MASK_32(0) => 0x00000000 @@ -2810,14 +2792,14 @@ value to host byte ordering.*/ * Set subfield of little-endian 4-byte value to specified value. */ #define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u32 *)(__pstart)) = \ - ( \ + *((__le32 *)(__pstart)) = \ + cpu_to_le32( \ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \ ); #define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u16 *)(__pstart)) = \ - ( \ + *((__le16 *)(__pstart)) = \ + cpu_to_le16( \ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \ ); diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 7f672f6879d0..58e148d7bc7b 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -281,7 +281,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, } if (value < 1 || value > 65535) { - wl1271_warning("dyanmic_ps_timeout is not in valid range"); + wl1271_warning("dynamic_ps_timeout is not in valid range"); return -ERANGE; } diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 47fe7f96a242..287023ef4a78 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -81,13 +81,6 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, sdio_claim_host(func); - if (unlikely(dump)) { - printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); - print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", - DUMP_PREFIX_OFFSET, 16, 1, - buf, len, false); - } - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", @@ -107,6 +100,13 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, if (WARN_ON(ret)) dev_err(child->parent, "sdio read failed (%d)\n", ret); + if (unlikely(dump)) { + printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); + print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", + DUMP_PREFIX_OFFSET, 16, 1, + buf, len, false); + } + return ret; } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 579521327b03..1073b27e54aa 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(queue, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); xenvif_napi_schedule_or_enable_events(queue); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8315fe73ecd0..cf82b5b42056 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1051,7 +1051,7 @@ err: if (work_done < budget) { int more_to_do = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) @@ -1073,8 +1073,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void xennet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; @@ -1105,8 +1105,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; - - return tot; } static void xennet_release_tx_bufs(struct netfront_queue *queue) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1ccce1cd6aca..63d8e18fb6b1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static int pci_bus_num_vf(struct device *dev) +{ + return pci_num_vf(to_pci_dev(dev)); +} + struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, @@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = { .bus_groups = pci_bus_groups, .drv_groups = pci_drv_groups, .pm = PCI_PM_OPS_PTR, + .num_vf = pci_bus_num_vf, }; EXPORT_SYMBOL(pci_bus_type); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6d4b68c483f3..e7addea8741b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_HIGH_WATERMARK_PACK 5 #define QETH_WATERMARK_PACK_FUZZ 1 -#define QETH_IP_HEADER_SIZE 40 - /* large receive scatter gather copy break */ #define QETH_RX_SG_CB (PAGE_SIZE >> 1) #define QETH_RX_PULL_LEN 256 @@ -674,8 +672,6 @@ struct qeth_card_info { int broadcast_capable; int unique_id; struct qeth_card_blkt blkt; - __u32 csum_mask; - __u32 tx_csum_mask; enum qeth_ipa_promisc_modes promisc_mode; __u32 diagass_support; __u32 hwtrap; @@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); -int qeth_send_startlan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e33558313834..315d8a2db7c0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -int qeth_send_startlan(struct qeth_card *card) +static int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -5087,6 +5086,20 @@ retriable: goto out; } + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5098,14 +5111,14 @@ retriable: if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out; } } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out; } } @@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card, if (cmd->hdr.prot_version == QETH_PROT_IPV6) card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } - if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); - } - if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && - cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - card->info.tx_csum_mask = - cmd->data.setassparms.data.flags_32bit; - QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); - } - return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); +/* Callback to handle checksum offload command reply from OSA card. + * Verify that required features have been enabled on the card. + * Return error in hdr->return_code as this value is checked by caller. + * + * Always returns zero to indicate no further messages from the OSA card. + */ +static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_checksum_cmd *chksum_cb = + (struct qeth_checksum_cmd *)reply->param; + + QETH_CARD_TEXT(card, 4, "chkdoccb"); + if (cmd->hdr.return_code) + return 0; + + memset(chksum_cb, 0, sizeof(*chksum_cb)); + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); + } + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + chksum_cb->enabled = + cmd->data.setassparms.data.chksum.enabled; + QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); + QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); + } + return 0; +} + +/* Send command to OSA card and check results. */ +static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, long data, + struct qeth_checksum_cmd *chksum_cb) +{ + struct qeth_cmd_buffer *iob; + int rc = -ENOMEM; + + QETH_CARD_TEXT(card, 4, "chkdocmd"); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + sizeof(__u32), QETH_PROT_IPV4); + if (iob) + rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, + qeth_ipa_checksum_run_cmd_cb, + chksum_cb); + return rc; +} + static int qeth_send_checksum_on(struct qeth_card *card, int cstype) { - long rxtx_arg; + const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR | + QETH_IPA_CHECKSUM_UDP | + QETH_IPA_CHECKSUM_TCP; + struct qeth_checksum_cmd chksum_cb; int rc; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, + &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.supported) != + required_features) + rc = -EIO; + else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && + cstype == IPA_INBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); + } if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Starting HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); return rc; } - rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask - : card->info.csum_mask; - rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE, - rxtx_arg); + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, + chksum_cb.supported, &chksum_cb); + if (!rc) { + if ((required_features & chksum_cb.enabled) != + required_features) + rc = -EIO; + } if (rc) { + qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s failed, using SW checksumming\n", QETH_CARD_IFNAME(card)); @@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) { - int rc; - - if (on) { - rc = qeth_send_checksum_on(card, cstype); - if (rc) - return -EIO; - } else { - rc = qeth_send_simple_setassparms(card, cstype, - IPA_CMD_ASS_STOP, 0); - if (rc) - return -EIO; - } - return 0; + int rc = (on) ? qeth_send_checksum_on(card, cstype) + : qeth_send_simple_setassparms(card, cstype, + IPA_CMD_ASS_STOP, 0); + return rc ? -EIO : 0; } static int qeth_set_ipa_tso(struct qeth_card *card, int on) diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 6cccc9a49ede..bc69d0a338ad 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -352,11 +352,28 @@ struct qeth_arp_query_info { char *udata; }; +/* IPA set assist segmentation bit definitions for receive and + * transmit checksum offloading. + */ +enum qeth_ipa_checksum_bits { + QETH_IPA_CHECKSUM_IP_HDR = 0x0002, + QETH_IPA_CHECKSUM_UDP = 0x0008, + QETH_IPA_CHECKSUM_TCP = 0x0010, + QETH_IPA_CHECKSUM_LP2LP = 0x0020 +}; + +/* IPA Assist checksum offload reply layout. */ +struct qeth_checksum_cmd { + __u32 supported; + __u32 enabled; +} __packed; + /* SETASSPARMS IPA Command: */ struct qeth_ipacmd_setassparms { struct qeth_ipacmd_setassparms_hdr hdr; union { __u32 flags_32bit; + struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry add_arp_entry; struct qeth_arp_query_data query_arp; __u8 ip[16]; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9c921c2833f1..bea483307618 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,9 +27,6 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); -static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); -static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, - enum qeth_ipa_cmds); static void qeth_l2_set_rx_mode(struct net_device *); static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode) return rc; } +static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "L2sdmac"); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; + memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); + return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); +} + +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); + if (rc == 0) { + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + card->dev->dev_addr, card->dev->name); + } else { + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + switch (rc) { + case -EEXIST: + dev_warn(&card->gdev->dev, + "MAC address %pM already exists\n", mac); + break; + case -EPERM: + dev_warn(&card->gdev->dev, + "MAC address %pM is not authorized\n", mac); + break; + } + } + return rc; +} + +static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Delmac"); + if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); + if (rc == 0) + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + return rc; +} + static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { int rc; QETH_CARD_TEXT(card, 2, "L2Sgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC); if (rc == -EEXIST) QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", mac, QETH_CARD_IFNAME(card)); @@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) int rc; QETH_CARD_TEXT(card, 2, "L2Dgmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELGMAC)); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC); if (rc) QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %d\n", @@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) return rc; } -static inline u32 qeth_l2_mac_hash(const u8 *addr) +static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) { - return get_unaligned((u32 *)(&addr[2])); + if (mac->is_uc) { + return qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_SETVMAC); + } else { + return qeth_l2_send_setgroupmac(card, mac->mac_addr); + } } -static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac) +static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac) { - - int rc; - if (mac->is_uc) { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_SETVMAC)); + return qeth_l2_send_setdelmac(card, mac->mac_addr, + IPA_CMD_DELVMAC); } else { - rc = qeth_setdel_makerc(card, - qeth_l2_send_setgroupmac(card, mac->mac_addr)); + return qeth_l2_send_delgroupmac(card, mac->mac_addr); } - return rc; } -static void qeth_l2_del_all_macs(struct qeth_card *card, int del) +static void qeth_l2_del_all_macs(struct qeth_card *card) { struct qeth_mac *mac; struct hlist_node *tmp; @@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del) spin_lock_bh(&card->mclock); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { - if (del) { - if (mac->is_uc) - qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - else - qeth_l2_send_delgroupmac(card, mac->mac_addr); - } hash_del(&mac->hnode); kfree(mac); } spin_unlock_bh(&card->mclock); } +static inline u32 qeth_l2_mac_hash(const u8 *addr) +{ + return get_unaligned((u32 *)(&addr[2])); +} + static inline int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { @@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_del_all_macs(card, 0); + qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } @@ -577,65 +627,6 @@ out: return work_done; } -static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, - enum qeth_ipa_cmds ipacmd) -{ - struct qeth_ipa_cmd *cmd; - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 2, "L2sdmac"); - iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); - if (!iob) - return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; - memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_send_ipa_cmd(card, iob, NULL, NULL); -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Setmac"); - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_SETVMAC)); - if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); - dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); - } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - switch (rc) { - case -EEXIST: - dev_warn(&card->gdev->dev, - "MAC address %pM already exists\n", mac); - break; - case -EPERM: - dev_warn(&card->gdev->dev, - "MAC address %pM is not authorized\n", mac); - break; - } - } - return rc; -} - -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, - IPA_CMD_DELVMAC)); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; @@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { if (mac->disp_flag == QETH_DISP_ADDR_DELETE) { - if (!mac->is_uc) - rc = qeth_l2_send_delgroupmac(card, - mac->mac_addr); - else { - rc = qeth_l2_send_setdelmac(card, mac->mac_addr, - IPA_CMD_DELVMAC); - } - + qeth_l2_remove_mac(card, mac); hash_del(&mac->hnode); kfree(mac); @@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { rc = qeth_l2_start_ipassists(card); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ac37d050e765..06d0addcc058 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 0e00a5ce0f00..05e9471e3d3f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; - if (card->state == CARD_STATE_DOWN) - return -EPERM; - memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); EBCASC(tmp_hsuid, 8); return sprintf(buf, "%s\n", tmp_hsuid); @@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_VIPA) @@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, @@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; - struct hlist_node *tmp; char addr_str[40]; + int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i = 0; + int i; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); - hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) { + hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_RXIP) @@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - i) <= entry_len) + if ((PAGE_SIZE - str_len) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); - i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); } spin_unlock_bh(&card->ip_lock); - i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - return i; + return str_len; } static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 1fbd495e5e63..c7652c35be19 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; @@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv) skb->protocol = eth_type_trans(skb, skb->dev); priv->nstats.rx_packets++; priv->nstats.rx_bytes += rx_ind_size; - skb->dev->last_rx = jiffies; netif_rx(skb); } else { priv->nstats.rx_dropped++; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index fb0928a4fb97..781ef623233e 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, skb_reserve(skb, BYTE_OFFSET); skb_put(skb, length); skb->protocol = eth_type_trans(skb, skb->dev); - skb->dev->last_rx = jiffies; netif_rx(skb); /* Fill rx ring */ skb_data = xlr_alloc_skb(); @@ -397,14 +396,6 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) TX_DROP_FRAME_COUNTER); } -static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats - ) -{ - xlr_stats(ndev, stats); - return stats; -} - static const struct net_device_ops xlr_netdev_ops = { .ndo_open = xlr_net_open, .ndo_stop = xlr_net_stop, @@ -412,7 +403,7 @@ static const struct net_device_ops xlr_netdev_ops = { .ndo_select_queue = xlr_net_select_queue, .ndo_set_mac_address = xlr_net_set_mac_addr, .ndo_set_rx_mode = xlr_set_rx_mode, - .ndo_get_stats64 = xlr_get_stats64, + .ndo_get_stats64 = xlr_stats, }; /* diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index f0900d1c4d7b..fc849d4a1b5d 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { /* No more work */ - napi_complete(napi); + napi_complete_done(napi, rx_count); enable_irq(rx_group->irq); } return rx_count; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 6b4c20872323..0b8053205091 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -23,6 +23,7 @@ #endif /* CONFIG_XFRM */ #include <linux/atomic.h> +#include <net/sch_generic.h> #include <asm/octeon/octeon.h> @@ -369,9 +370,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_NET_SCHED skb->tc_index = 0; -#ifdef CONFIG_NET_CLS_ACT - skb->tc_verd = 0; -#endif /* CONFIG_NET_CLS_ACT */ + skb_reset_tc(skb); #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index e5ba7d1a809f..43a77745e6fb 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; } - dev->last_rx = jiffies; /* Data frame - extract src/dst addresses */ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 82f654305414..b1f2fdfcb718 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, stats = hostap_get_stats(dev); from_assoc_ap = 1; } -#endif - - dev->last_rx = jiffies; -#ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index c1f674f5268c..ca3743d273e0 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget) /* If there aren't any more packets to receive stop the poll */ if (rx_count < budget) - napi_complete(napi); + napi_complete_done(napi, rx_count); return rx_count; } diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 4fe037aeef12..6134eba5cad4 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb) &usbin->rxfrm.desc.frame_control, hdrlen); skb->dev = wlandev->netdev; - skb->dev->last_rx = jiffies; /* And set the frame length properly */ skb_trim(skb, data_len + hdrlen); diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 73fcf07254fe..53dbbd69e552 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev, } if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) { - skb->dev->last_rx = jiffies; wlandev->netdev->stats.rx_packets++; wlandev->netdev->stats.rx_bytes += skb->len; netif_rx_ni(skb); @@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg) skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 5dc34653274a..c42e9c305134 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, return r; } +static bool vhost_exceeds_maxpend(struct vhost_net *net) +{ + struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; + struct vhost_virtqueue *vq = &nvq->vq; + + return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV + == nvq->done_idx; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) @@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net) /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ - if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) - % UIO_MAXIOV == nvq->done_idx)) + if (unlikely(vhost_exceeds_maxpend(net))) break; head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, @@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net) msg.msg_control = NULL; ubufs = NULL; } + + total_len += len; + if (total_len < VHOST_NET_WEIGHT && + !vhost_vq_avail_empty(&net->dev, vq) && + likely(!vhost_exceeds_maxpend(net))) { + msg.msg_flags |= MSG_MORE; + } else { + msg.msg_flags &= ~MSG_MORE; + } + /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { @@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); - total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 8f99fe08de02..4269e621e254 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2239,11 +2239,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) __virtio16 avail_idx; int r; + if (vq->avail_idx != vq->last_avail_idx) + return false; + r = vhost_get_user(vq, avail_idx, &vq->avail->idx); - if (r) + if (unlikely(r)) return false; + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); - return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx; + return vq->avail_idx == vq->last_avail_idx; } EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); |