diff options
Diffstat (limited to 'drivers/net/ethernet/ti/am65-cpsw-nuss.c')
-rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-nuss.c | 99 |
1 files changed, 46 insertions, 53 deletions
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index cbe99017cbfa..ba6db61dd227 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; dma_addr_t desc_dma; dma_addr_t buf_dma; - void *swdata; desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); if (!desc_rx) { @@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, buf_dma, AM65_CPSW_MAX_PACKET_SIZE); swdata = cppi5_hdesc_get_swdata(desc_rx); - *((void **)swdata) = page_address(page); + swdata->page = page; + swdata->flow_id = flow_idx; return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, desc_rx, desc_dma); @@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, struct page *page, - bool allow_direct, - int desc_idx) + bool allow_direct) { page_pool_put_full_page(flow->page_pool, page, allow_direct); - flow->pages[desc_idx] = NULL; } static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) { - struct am65_cpsw_rx_flow *flow = data; + struct am65_cpsw_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; - struct am65_cpsw_rx_chn *rx_chn; + struct am65_cpsw_swdata *swdata; dma_addr_t buf_dma; + struct page *page; u32 buf_dma_len; - void *page_addr; - void **swdata; - int desc_idx; + u32 flow_id; - rx_chn = &flow->common->rx_chns; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; + page = swdata->page; + flow_id = swdata->flow_id; cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx); + am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); } static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, @@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) ret = -ENOMEM; goto fail_rx; } - flow->pages[i] = page; ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); if (ret < 0) { dev_err(common->dev, "cannot submit page to rx channel flow %d, error %d\n", flow_idx, ret); - am65_cpsw_put_page(flow, page, false, i); + am65_cpsw_put_page(flow, page, false); goto fail_rx; } } @@ -763,9 +758,9 @@ fail_tx: k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); fail_rx: - for (i = 0; i < common->rx_ch_num_flows; i--) - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + for (i = 0; i < common->rx_ch_num_flows; i++) + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); am65_cpsw_destroy_xdp_rxqs(common); @@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) dev_err(common->dev, "rx teardown timeout\n"); } - for (i = 0; i < common->rx_ch_num_flows; i++) { + for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { napi_disable(&rx_chn->flows[i].napi_rx); hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); } k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); @@ -1028,7 +1023,7 @@ pool_free: static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_port *port, struct xdp_buff *xdp, - int desc_idx, int cpu, int *len) + int cpu, int *len) { struct am65_cpsw_common *common = flow->common; struct am65_cpsw_ndev_priv *ndev_priv; @@ -1101,7 +1096,7 @@ drop: } page = virt_to_head_page(xdp->data); - am65_cpsw_put_page(flow, page, true, desc_idx); + am65_cpsw_put_page(flow, page, true); out: return ret; @@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; struct page *page, *new_page; dma_addr_t desc_dma, buf_dma; struct am65_cpsw_port *port; - int headroom, desc_idx, ret; struct net_device *ndev; u32 flow_idx = flow->id; struct sk_buff *skb; struct xdp_buff xdp; + int headroom, ret; void *page_addr; - void **swdata; u32 *psdata; *xdp_state = AM65_CPSW_XDP_PASS; @@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, __func__, flow_idx, &desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; - page = virt_to_page(page_addr); + page = swdata->page; + page_addr = page_address(page); cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); pkt_len = cppi5_hdesc_get_pktlen(desc_rx); @@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - skb = am65_cpsw_build_skb(page_addr, ndev, AM65_CPSW_MAX_PACKET_SIZE); if (unlikely(!skb)) { @@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx, + *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, cpu, &pkt_len); if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; @@ -1247,10 +1239,8 @@ allocate: return -ENOMEM; } - flow->pages[desc_idx] = new_page; - if (netif_dormant(ndev)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_dropped++; return 0; } @@ -1258,7 +1248,7 @@ allocate: requeue: ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); if (WARN_ON(ret < 0)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) { flow = &rx_chn->flows[i]; flow->page_pool = NULL; - flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC, - sizeof(*flow->pages), GFP_KERNEL); - if (!flow->pages) - return -ENOMEM; } rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); @@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) flow = &rx_chn->flows[i]; flow->id = i; flow->common = common; + flow->irq = -EINVAL; rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.rx_cfg.size = max_desc_num; - rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + /* share same FDQ for all flows */ + rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, @@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) if (ret) { dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); + flow->irq = -EINVAL; goto err; } } @@ -2744,10 +2733,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) return 0; /* alloc netdev */ - port->ndev = devm_alloc_etherdev_mqs(common->dev, - sizeof(struct am65_cpsw_ndev_priv), - AM65_CPSW_MAX_QUEUES, - AM65_CPSW_MAX_QUEUES); + port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv), + AM65_CPSW_MAX_QUEUES, + AM65_CPSW_MAX_QUEUES); if (!port->ndev) { dev_err(dev, "error allocating slave net_device %u\n", port->port_id); @@ -2868,8 +2856,12 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) for (i = 0; i < common->port_num; i++) { port = &common->ports[i]; - if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED) + if (!port->ndev) + continue; + if (port->ndev->reg_state == NETREG_REGISTERED) unregister_netdev(port->ndev); + free_netdev(port->ndev); + port->ndev = NULL; } } @@ -3346,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, - &rx_chan->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + rx_chan, + am65_cpsw_nuss_rx_cleanup, !!i); k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); @@ -3613,16 +3605,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ret = am65_cpsw_nuss_init_ndevs(common); if (ret) - goto err_free_phylink; + goto err_ndevs_clear; ret = am65_cpsw_nuss_register_ndevs(common); if (ret) - goto err_free_phylink; + goto err_ndevs_clear; pm_runtime_put(dev); return 0; -err_free_phylink: +err_ndevs_clear: + am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); err_of_clear: @@ -3652,13 +3645,13 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev) return; } - am65_cpsw_unregister_devlink(common); am65_cpsw_unregister_notifiers(common); /* must unregister ndevs here because DD release_driver routine calls * dma_deconfigure(dev) before devres_release_all(dev) */ am65_cpsw_nuss_cleanup_ndev(common); + am65_cpsw_unregister_devlink(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); am65_cpsw_disable_serdes_phy(common); |