diff options
author | Jiawen Wu <jiawenwu@trustnetic.com> | 2023-02-03 17:11:34 +0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-02-06 09:22:48 +0000 |
commit | 0d22be525a619d03a603e70da14b8af006d0e58a (patch) | |
tree | e23106a8b18761f7db052830869b613c8e0ec016 /drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | |
parent | 09a508800952f8d4b04bf27fd3fe9d38039cf06e (diff) |
net: txgbe: Support Rx and Tx process path
Clean Rx and Tx ring interrupts, process packets in the data path.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/wangxun/txgbe/txgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 37 |
1 files changed, 28 insertions, 9 deletions
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 3b50acb09699..094df377726b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -223,6 +223,10 @@ static void txgbe_up_complete(struct wx *wx) wx_control_hw(wx, true); wx_configure_vectors(wx); + /* make sure to complete pre-operations */ + smp_mb__before_atomic(); + wx_napi_enable_all(wx); + /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC); rd32(wx, WX_PX_MISC_IC); @@ -236,6 +240,10 @@ static void txgbe_up_complete(struct wx *wx) wr32(wx, WX_MAC_WDG_TIMEOUT, reg); reg = rd32(wx, WX_MAC_TX_CFG); wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G); + + /* enable transmits */ + netif_tx_start_all_queues(wx->netdev); + netif_carrier_on(wx->netdev); } static void txgbe_reset(struct wx *wx) @@ -268,10 +276,12 @@ static void txgbe_disable_device(struct wx *wx) /* this call also flushes the previous write */ wx_disable_rx_queue(wx, wx->rx_ring[i]); + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); netif_tx_disable(netdev); wx_irq_disable(wx); + wx_napi_disable_all(wx); if (wx->bus.func < 2) wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); @@ -300,6 +310,9 @@ static void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); + + wx_clean_all_tx_rings(wx); + wx_clean_all_rx_rings(wx); } /** @@ -381,10 +394,21 @@ static int txgbe_open(struct net_device *netdev) if (err) goto err_free_isb; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); + if (err) + goto err_free_irq; + + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); + if (err) + goto err_free_irq; + txgbe_up_complete(wx); return 0; +err_free_irq: + wx_free_irq(wx); err_free_isb: wx_free_isb_resources(wx); err_reset: @@ -403,8 +427,6 @@ err_reset: static void txgbe_close_suspend(struct wx *wx) { txgbe_disable_device(wx); - - wx_free_irq(wx); wx_free_resources(wx); } @@ -461,19 +483,14 @@ static void txgbe_shutdown(struct pci_dev *pdev) } } -static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return NETDEV_TX_OK; -} - static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, - .ndo_start_xmit = txgbe_xmit_frame, + .ndo_start_xmit = wx_xmit_frame, .ndo_set_rx_mode = wx_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, + .ndo_get_stats64 = wx_get_stats64, }; /** @@ -647,6 +664,8 @@ static int txgbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); + netif_tx_stop_all_queues(netdev); + /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough * bandwidth due to being older generation PCIe parts. We clamp these |