diff options
Diffstat (limited to 'drivers/net/ethernet/wangxun')
27 files changed, 4744 insertions, 936 deletions
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 86310588c6c1..c9d88673d306 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN config LIBWX tristate + select PAGE_POOL help Common library for Wangxun(R) Ethernet drivers. @@ -25,6 +26,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX + select PHYLIB help This driver supports Wangxun(R) GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile index 1ed5e23af944..42ccd6e4052e 100644 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_LIBWX) += libwx.o -libwx-objs := wx_hw.o +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c new file mode 100644 index 000000000000..93cb6f2294e7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phy.h> + +#include "wx_type.h" +#include "wx_ethtool.h" + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) +{ + struct wx *wx = netdev_priv(netdev); + + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); + strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); +} +EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h new file mode 100644 index 000000000000..e85538c69454 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_ETHTOOL_H_ +#define _WX_ETHTOOL_H_ + +void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +#endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index c57dc3238b3f..7db57f934a91 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -2,59 +2,100 @@ /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ #include <linux/etherdevice.h> +#include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/iopoll.h> #include <linux/pci.h> #include "wx_type.h" +#include "wx_lib.h" #include "wx_hw.h" -static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask) +static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; - mask = (qmask & 0xFFFFFFFF); + mask = (qmask & U32_MAX); if (mask) - wr32(wxhw, WX_PX_IMS(0), mask); + wr32(wx, WX_PX_IMS(0), mask); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { mask = (qmask >> 32); if (mask) - wr32(wxhw, WX_PX_IMS(1), mask); + wr32(wx, WX_PX_IMS(1), mask); } } +void wx_intr_enable(struct wx *wx, u64 qmask) +{ + u32 mask; + + mask = (qmask & U32_MAX); + if (mask) + wr32(wx, WX_PX_IMC(0), mask); + if (wx->mac.type == wx_mac_sp) { + mask = (qmask >> 32); + if (mask) + wr32(wx, WX_PX_IMC(1), mask); + } +} +EXPORT_SYMBOL(wx_intr_enable); + +/** + * wx_irq_disable - Mask off interrupt generation on the NIC + * @wx: board private structure + **/ +void wx_irq_disable(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wr32(wx, WX_PX_MISC_IEN, 0); + wx_intr_disable(wx, WX_INTR_ALL); + + if (pdev->msix_enabled) { + int vector; + + for (vector = 0; vector < wx->num_q_vectors; vector++) + synchronize_irq(wx->msix_entries[vector].vector); + + synchronize_irq(wx->msix_entries[vector].vector); + } else { + synchronize_irq(pdev->irq); + } +} +EXPORT_SYMBOL(wx_irq_disable); + /* cmd_addr is used for some special command: * 1. to be sector address, when implemented erase sector command * 2. to be flash address when implemented read, write flash address */ -static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr) +static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr) { u32 cmd_val = 0, val = 0; cmd_val = WX_SPI_CMD_CMD(cmd) | WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | cmd_addr; - wr32(wxhw, WX_SPI_CMD, cmd_val); + wr32(wx, WX_SPI_CMD, cmd_val); return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, - false, wxhw, WX_SPI_STATUS); + false, wx, WX_SPI_STATUS); } -static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data) +static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data) { int ret = 0; - ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr); + ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr); if (ret < 0) return ret; - *data = rd32(wxhw, WX_SPI_DATA); + *data = rd32(wx, WX_SPI_DATA); return ret; } -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) +int wx_check_flash_load(struct wx *hw, u32 check_bit) { u32 reg = 0; int err = 0; @@ -73,29 +114,25 @@ int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) } EXPORT_SYMBOL(wx_check_flash_load); -void wx_control_hw(struct wx_hw *wxhw, bool drv) +void wx_control_hw(struct wx *wx, bool drv) { - if (drv) { - /* Let firmware know the driver has taken over */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD); - } else { - /* Let firmware take over control of hw */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, 0); - } + /* True : Let firmware know the driver has taken over + * False : Let firmware take over control of hw + */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD, + drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0); } EXPORT_SYMBOL(wx_control_hw); /** * wx_mng_present - returns 0 when management capability is present - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure */ -int wx_mng_present(struct wx_hw *wxhw) +int wx_mng_present(struct wx *wx) { u32 fwsm; - fwsm = rd32(wxhw, WX_MIS_ST); + fwsm = rd32(wx, WX_MIS_ST); if (fwsm & WX_MIS_ST_MNG_INIT_DN) return 0; else @@ -108,40 +145,40 @@ static DEFINE_MUTEX(wx_sw_sync_lock); /** * wx_release_sw_sync - Release SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask) +static void wx_release_sw_sync(struct wx *wx, u32 mask) { mutex_lock(&wx_sw_sync_lock); - wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0); + wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0); mutex_unlock(&wx_sw_sync_lock); } /** * wx_acquire_sw_sync - Acquire SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) +static int wx_acquire_sw_sync(struct wx *wx, u32 mask) { u32 sem = 0; int ret = 0; mutex_lock(&wx_sw_sync_lock); ret = read_poll_timeout(rd32, sem, !(sem & mask), - 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC); + 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC); if (!ret) { sem |= mask; - wr32(wxhw, WX_MNG_SWFW_SYNC, sem); + wr32(wx, WX_MNG_SWFW_SYNC, sem); } else { - wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem); + wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem); } mutex_unlock(&wx_sw_sync_lock); @@ -150,7 +187,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) /** * wx_host_interface_command - Issue command to manageability block - * @wxhw: pointer to the HW structure + * @wx: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes @@ -162,7 +199,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) * So we will leave this up to the caller to read back the data * in these cases. **/ -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct wx_hic_hdr); @@ -172,17 +209,17 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, u16 buf_len; if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length); + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); return -EINVAL; } - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); if (status != 0) return status; /* Calculate length in DWORDs. We must be DWORD aligned */ if ((length % (sizeof(u32))) != 0) { - wx_err(wxhw, "Buffer length failure, not aligned to dword"); + wx_err(wx, "Buffer length failure, not aligned to dword"); status = -EINVAL; goto rel_out; } @@ -193,38 +230,38 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, * into the ram area. */ for (i = 0; i < dword_len; i++) { - wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); /* write flush */ - buf[i] = rd32a(wxhw, WX_MNG_MBOX, i); + buf[i] = rd32a(wx, WX_MNG_MBOX, i); } /* Setting this bit tells the ARC that a new command is pending. */ - wr32m(wxhw, WX_MNG_MBOX_CTL, + wr32m(wx, WX_MNG_MBOX_CTL, WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY); status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, - timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL); + timeout * 1000, false, wx, WX_MNG_MBOX_CTL); /* Check command completion */ if (status) { - wx_dbg(wxhw, "Command has failed with no status valid.\n"); + wx_dbg(wx, "Command has failed with no status valid.\n"); - buf[0] = rd32(wxhw, WX_MNG_MBOX); + buf[0] = rd32(wx, WX_MNG_MBOX); if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { status = -EINVAL; goto rel_out; } if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_dbg(wxhw, "It's unknown cmd.\n"); + wx_dbg(wx, "It's unknown cmd.\n"); status = -EINVAL; goto rel_out; } - wx_dbg(wxhw, "write value:\n"); + wx_dbg(wx, "write value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buffer[i]); - wx_dbg(wxhw, "read value:\n"); + wx_dbg(wx, "%x ", buffer[i]); + wx_dbg(wx, "read value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buf[i]); + wx_dbg(wx, "%x ", buf[i]); } if (!return_data) @@ -235,7 +272,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } @@ -245,7 +282,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, goto rel_out; if (length < buf_len + hdr_size) { - wx_err(wxhw, "Buffer not large enough for reply message.\n"); + wx_err(wx, "Buffer not large enough for reply message.\n"); status = -EFAULT; goto rel_out; } @@ -255,12 +292,12 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } rel_out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); return status; } EXPORT_SYMBOL(wx_host_interface_command); @@ -268,13 +305,13 @@ EXPORT_SYMBOL(wx_host_interface_command); /** * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd * assuming that the semaphore is already obtained. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) +static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) { struct wx_hic_read_shadow_ram buffer; int status; @@ -289,33 +326,33 @@ static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) /* one word */ buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer), + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) return status; - *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); return status; } /** * wx_read_ee_hostif - Read EEPROM word using a host interface cmd - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data) +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data) { int status = 0; - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status == 0) { - status = wx_read_ee_hostif_data(wxhw, offset, data); - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_read_ee_hostif_data(wx, offset, data); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); } return status; @@ -324,14 +361,14 @@ EXPORT_SYMBOL(wx_read_ee_hostif); /** * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data) { struct wx_hic_read_shadow_ram buffer; @@ -342,7 +379,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, u32 i; /* Take semaphore for the entire operation. */ - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status != 0) return status; @@ -361,20 +398,20 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) { - wx_err(wxhw, "Host interface command failed\n"); + wx_err(wx, "Host interface command failed\n"); goto out; } for (i = 0; i < words_to_read; i++) { u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; - value = rd32(wxhw, reg); + value = rd32(wx, reg); data[current_word] = (u16)(value & 0xffff); current_word++; i++; @@ -388,7 +425,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, } out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); return status; } EXPORT_SYMBOL(wx_read_ee_hostif_buffer); @@ -416,12 +453,12 @@ static u8 wx_calculate_checksum(u8 *buffer, u32 length) /** * wx_reset_hostif - send reset cmd to fw - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sends reset cmd to firmware through the manageability * block. **/ -int wx_reset_hostif(struct wx_hw *wxhw) +int wx_reset_hostif(struct wx *wx) { struct wx_hic_reset reset_cmd; int ret_val = 0; @@ -430,15 +467,15 @@ int wx_reset_hostif(struct wx_hw *wxhw) reset_cmd.hdr.cmd = FW_RESET_CMD; reset_cmd.hdr.buf_len = FW_RESET_LEN; reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - reset_cmd.lan_id = wxhw->bus.func; - reset_cmd.reset_type = (u16)wxhw->reset_type; + reset_cmd.lan_id = wx->bus.func; + reset_cmd.reset_type = (u16)wx->reset_type; reset_cmd.hdr.checksum = 0; reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd, (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd, + ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd, sizeof(reset_cmd), WX_HI_COMMAND_TIMEOUT, true); @@ -460,14 +497,14 @@ EXPORT_SYMBOL(wx_reset_hostif); /** * wx_init_eeprom_params - Initialize EEPROM params - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Initializes the EEPROM parameters wx_eeprom_info within the * wx_hw struct in order to set up EEPROM access. **/ -void wx_init_eeprom_params(struct wx_hw *wxhw) +void wx_init_eeprom_params(struct wx *wx) { - struct wx_eeprom_info *eeprom = &wxhw->eeprom; + struct wx_eeprom_info *eeprom = &wx->eeprom; u16 eeprom_size; u16 data = 0x80; @@ -475,21 +512,21 @@ void wx_init_eeprom_params(struct wx_hw *wxhw) eeprom->semaphore_delay = 10; eeprom->type = wx_eeprom_none; - if (!(rd32(wxhw, WX_SPI_STATUS) & + if (!(rd32(wx, WX_SPI_STATUS) & WX_SPI_STATUS_FLASH_BYPASS)) { eeprom->type = wx_flash; eeprom_size = 4096; eeprom->word_size = eeprom_size >> 1; - wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n", + wx_dbg(wx, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } } - if (wxhw->mac.type == wx_mac_sp) { - if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) { - wx_err(wxhw, "NVM Read Error\n"); + if (wx->mac.type == wx_mac_sp) { + if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { + wx_err(wx, "NVM Read Error\n"); return; } data = data >> 1; @@ -501,22 +538,22 @@ EXPORT_SYMBOL(wx_init_eeprom_params); /** * wx_get_mac_addr - Generic get MAC address - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from first Receive Address Register (RAR0) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr) +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr) { u32 rar_high; u32 rar_low; u16 i; - wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0); - rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H); - rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L); + wr32(wx, WX_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H); + rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L); for (i = 0; i < 2; i++) mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); @@ -528,7 +565,7 @@ EXPORT_SYMBOL(wx_get_mac_addr); /** * wx_set_rar - Set Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @pools: VMDq "set" or "pool" index @@ -536,25 +573,25 @@ EXPORT_SYMBOL(wx_get_mac_addr); * * Puts an ethernet address into a receive address register. **/ -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, - u32 enable_addr) +static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, + u32 enable_addr) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 rar_low, rar_high; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } /* select the MAC address */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); /* setup VMDq pool mapping */ - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wxhw->mac.type == wx_mac_sp) - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32); + wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + if (wx->mac.type == wx_mac_sp) + wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -572,31 +609,30 @@ int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, if (enable_addr != 0) rar_high |= WX_PSR_MAC_SWC_AD_H_AV; - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), rar_high); return 0; } -EXPORT_SYMBOL(wx_set_rar); /** * wx_clear_rar - Remove Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ -int wx_clear_rar(struct wx_hw *wxhw, u32 index) +static int wx_clear_rar(struct wx *wx, u32 index) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } @@ -604,78 +640,77 @@ int wx_clear_rar(struct wx_hw *wxhw, u32 index) * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_L, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_H, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), 0); return 0; } -EXPORT_SYMBOL(wx_clear_rar); /** * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address - * @wxhw: pointer to hardware struct + * @wx: pointer to hardware struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ -static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq) +static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 mpsar_lo, mpsar_hi; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", rar); + wx_err(wx, "RAR index %d is out of range.\n", rar); return -EINVAL; } - wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar); - mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L); - mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H); + wr32(wx, WX_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H); if (!mpsar_lo && !mpsar_hi) return 0; /* was that the last pool using this rar? */ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - wx_clear_rar(wxhw, rar); + wx_clear_rar(wx, rar); return 0; } /** * wx_init_uta_tables - Initialize the Unicast Table Array - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure **/ -static void wx_init_uta_tables(struct wx_hw *wxhw) +static void wx_init_uta_tables(struct wx *wx) { int i; - wx_dbg(wxhw, " Clearing UTA\n"); + wx_dbg(wx, " Clearing UTA\n"); for (i = 0; i < 128; i++) - wr32(wxhw, WX_PSR_UC_TBL(i), 0); + wr32(wx, WX_PSR_UC_TBL(i), 0); } /** * wx_init_rx_addrs - Initializes receive address filters. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ -void wx_init_rx_addrs(struct wx_hw *wxhw) +void wx_init_rx_addrs(struct wx *wx) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 psrctl; int i; @@ -683,97 +718,829 @@ void wx_init_rx_addrs(struct wx_hw *wxhw) * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (!is_valid_ether_addr(wxhw->mac.addr)) { + if (!is_valid_ether_addr(wx->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ - wx_get_mac_addr(wxhw, wxhw->mac.addr); - wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr); + wx_get_mac_addr(wx, wx->mac.addr); + wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr); } else { /* Setup the receive address. */ - wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n"); - wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr); + wx_dbg(wx, "Overriding MAC Address in RAR[0]\n"); + wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr); - wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); + wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { /* clear VMDq pool/queue selection for RAR 0 */ - wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL); + wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); } } /* Zero out the other receive addresses. */ - wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1); + wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { - wr32(wxhw, WX_PSR_MAC_SWC_IDX, i); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0); + wr32(wx, WX_PSR_MAC_SWC_IDX, i); + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32(wx, WX_PSR_MAC_SWC_AD_H, 0); } /* Clear the MTA */ - wxhw->addr_ctrl.mta_in_use = 0; - psrctl = rd32(wxhw, WX_PSR_CTL); + wx->addr_ctrl.mta_in_use = 0; + psrctl = rd32(wx, WX_PSR_CTL); psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); - psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; - wr32(wxhw, WX_PSR_CTL, psrctl); - wx_dbg(wxhw, " Clearing MTA\n"); - for (i = 0; i < wxhw->mac.mcft_size; i++) - wr32(wxhw, WX_PSR_MC_TBL(i), 0); + psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; + wr32(wx, WX_PSR_CTL, psrctl); + wx_dbg(wx, " Clearing MTA\n"); + for (i = 0; i < wx->mac.mcft_size; i++) + wr32(wx, WX_PSR_MC_TBL(i), 0); - wx_init_uta_tables(wxhw); + wx_init_uta_tables(wx); } EXPORT_SYMBOL(wx_init_rx_addrs); -void wx_disable_rx(struct wx_hw *wxhw) +static void wx_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } + } +} + +/* this function destroys the first RAR entry */ +void wx_mac_set_default_filter(struct wx *wx, u8 *addr) +{ + memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); + wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); + wx_set_rar(wx, 0, wx->mac_table[0].addr, + wx->mac_table[0].pools, + WX_PSR_MAC_SWC_AD_H_AV); +} +EXPORT_SYMBOL(wx_mac_set_default_filter); + +void wx_flush_sw_mac_table(struct wx *wx) +{ + u32 i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + wx->mac_table[i].pools = 0; + } + wx_sync_mac_table(wx); +} +EXPORT_SYMBOL(wx_flush_sw_mac_table); + +static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, wx->mac_table[i].addr)) { + if (wx->mac_table[i].pools != (1ULL << pool)) { + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + } + } + + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) + continue; + wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED | + WX_MAC_STATE_IN_USE); + memcpy(wx->mac_table[i].addr, addr, ETH_ALEN); + wx->mac_table[i].pools |= (1ULL << pool); + wx_sync_mac_table(wx); + return i; + } + return -ENOMEM; +} + +static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* search table for addr, if found, set to 0 and sync */ + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!ether_addr_equal(addr, wx->mac_table[i].addr)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].pools &= ~(1ULL << pool); + if (!wx->mac_table[i].pools) { + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + } + wx_sync_mac_table(wx); + return 0; + } + return -ENOMEM; +} + +static int wx_available_rars(struct wx *wx) +{ + u32 i, count = 0; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state == 0) + count++; + } + + return count; +} + +/** + * wx_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * @pool: index for mac table + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int wx_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct wx *wx = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > wx_available_rars(wx)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + wx_del_mac_filter(wx, ha->addr, pool); + wx_add_mac_filter(wx, ha->addr, pool); + count++; + } + } + return count; +} + +/** + * wx_mta_vector - Determines bit-vector in multicast table to set + * @wx: pointer to private structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr) +{ + u32 vector = 0; + + switch (wx->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + wx_err(wx, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * wx_set_mta - Set bit-vector in multicast table + * @wx: pointer to private structure + * @mc_addr: Multicast address + * + * Sets the bit-vector in the multicast table. + **/ +static void wx_set_mta(struct wx *wx, u8 *mc_addr) +{ + u32 vector, vector_bit, vector_reg; + + wx->addr_ctrl.mta_in_use++; + + vector = wx_mta_vector(wx, mc_addr); + wx_dbg(wx, " bit-vector = 0x%03X\n", vector); + + /* The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * wx_update_mc_addr_list - Updates MAC list of multicast addresses + * @wx: pointer to private structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i, psrctl; + + /* Set the new number of MC addresses that we are being requested to + * use. + */ + wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + wx->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + wx_dbg(wx, " Clearing MTA\n"); + memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow)); + + /* Update mta_shadow */ + netdev_for_each_mc_addr(ha, netdev) { + wx_dbg(wx, " Adding the multicast addresses:\n"); + wx_set_mta(wx, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < wx->mac.mcft_size; i++) + wr32a(wx, WX_PSR_MC_TBL(0), i, + wx->mac.mta_shadow[i]); + + if (wx->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); + psrctl |= WX_PSR_CTL_MFE | + (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT); + wr32(wx, WX_PSR_CTL, psrctl); + } + + wx_dbg(wx, "Update mc addr list Complete\n"); +} + +/** + * wx_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int wx_write_mc_addr_list(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return 0; + + wx_update_mc_addr_list(wx, netdev); + + return netdev_mc_count(netdev); +} + +/** + * wx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +int wx_set_mac(struct net_device *netdev, void *p) +{ + struct wx *wx = netdev_priv(netdev); + struct sockaddr *addr = p; + int retval; + + retval = eth_prepare_mac_addr_change(netdev, addr); + if (retval) + return retval; + + wx_del_mac_filter(wx, wx->mac.addr, 0); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); + + wx_mac_set_default_filter(wx, wx->mac.addr); + + return 0; +} +EXPORT_SYMBOL(wx_set_mac); + +void wx_disable_rx(struct wx *wx) { u32 pfdtxgswc; u32 rxctrl; - rxctrl = rd32(wxhw, WX_RDB_PB_CTL); + rxctrl = rd32(wx, WX_RDB_PB_CTL); if (rxctrl & WX_RDB_PB_CTL_RXEN) { - pfdtxgswc = rd32(wxhw, WX_PSR_CTL); + pfdtxgswc = rd32(wx, WX_PSR_CTL); if (pfdtxgswc & WX_PSR_CTL_SW_EN) { pfdtxgswc &= ~WX_PSR_CTL_SW_EN; - wr32(wxhw, WX_PSR_CTL, pfdtxgswc); - wxhw->mac.set_lben = true; + wr32(wx, WX_PSR_CTL, pfdtxgswc); + wx->mac.set_lben = true; } else { - wxhw->mac.set_lben = false; + wx->mac.set_lben = false; } rxctrl &= ~WX_RDB_PB_CTL_RXEN; - wr32(wxhw, WX_RDB_PB_CTL, rxctrl); + wr32(wx, WX_RDB_PB_CTL, rxctrl); - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac receiver */ - wr32m(wxhw, WX_MAC_RX_CFG, + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); } } } EXPORT_SYMBOL(wx_disable_rx); +static void wx_enable_rx(struct wx *wx) +{ + u32 psrctl; + + /* enable mac receiver */ + wr32m(wx, WX_MAC_RX_CFG, + WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + wr32m(wx, WX_RDB_PB_CTL, + WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN); + + if (wx->mac.set_lben) { + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_SW_EN; + wr32(wx, WX_PSR_CTL, psrctl); + wx->mac.set_lben = false; + } +} + +/** + * wx_set_rxpba - Initialize Rx packet buffer + * @wx: pointer to private structure + **/ +static void wx_set_rxpba(struct wx *wx) +{ + u32 rxpktsize, txpktsize, txpbthresh; + + rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = wx->mac.tx_pb_size; + txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX; + wr32(wx, WX_TDB_PB_SZ(0), txpktsize); + wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); +} + +static void wx_configure_port(struct wx *wx) +{ + u32 value, i; + + value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_D_VLAN | + WX_CFG_PORT_CTL_QINQ, + value); + + wr32(wx, WX_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + wx->tpid[0] = ETH_P_8021Q; + wx->tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(wx, WX_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + wx->tpid[i] = ETH_P_8021Q; +} + +/** + * wx_disable_sec_rx_path - Stops the receive data path + * @wx: pointer to private structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +static int wx_disable_sec_rx_path(struct wx *wx) +{ + u32 secrx; + + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS); + + return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, + 1000, 40000, false, wx, WX_RSC_ST); +} + +/** + * wx_enable_sec_rx_path - Enables the receive data path + * @wx: pointer to private structure + * + * Enables the receive data path. + **/ +static void wx_enable_sec_rx_path(struct wx *wx) +{ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +void wx_set_rx_mode(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32(wx, WX_PSR_CTL); + fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr &= ~(WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_MPE | + WX_PSR_VM_L2CTL_ROPE | + WX_PSR_VM_L2CTL_ROMPE); + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN); + + /* set all bits that we expect to always be set */ + fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE; + vmolr |= WX_PSR_VM_L2CTL_BAM | + WX_PSR_VM_L2CTL_AUPE | + WX_PSR_VM_L2CTL_VACC; + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + + wx->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + wx->addr_ctrl.user_set_promisc = true; + fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= WX_PSR_VM_L2CTL_MPE; + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= WX_PSR_CTL_MPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE); + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(wx, WX_RSC_CTL, + WX_RSC_CTL_SAVE_MAC_ERR, + WX_RSC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE; + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = wx_write_uc_addr_list(netdev, 0); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROPE; + vmolr |= WX_PSR_VM_L2CTL_UPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = wx_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + vmolr |= WX_PSR_VM_L2CTL_MPE; + } + + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + wr32(wx, WX_PSR_CTL, fctrl); + wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); +} +EXPORT_SYMBOL(wx_set_rx_mode); + +static void wx_set_rx_buffer_len(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 mhadd, max_frame; + + max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(wx, WX_PSR_MAX_SZ); + if (max_frame != mhadd) + wr32(wx, WX_PSR_MAX_SZ, max_frame); +} + +/* Disable the specified rx queue */ +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + /* write value back with RRCFG.EN bit cleared */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN), + 10, 100, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not cleared within the polling period\n", + reg_idx); + } +} +EXPORT_SYMBOL(wx_disable_rx_queue); + +static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rxdctl; + int ret; + + ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN, + 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx)); + + if (ret == -ETIMEDOUT) { + /* Just for information */ + wx_err(wx, + "RRCFG.EN on Rx queue %d not set within the polling period\n", + reg_idx); + } +} + +static void wx_configure_srrctl(struct wx *wx, + struct wx_ring *rx_ring) +{ + u16 reg_idx = rx_ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ | + WX_PX_RR_CFG_RR_BUF_SZ | + WX_PX_RR_CFG_SPLIT_MODE); + /* configure header buffer length, needed for RSC */ + srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_configure_tx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u32 txdctl = WX_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + int ret; + + /* disable queue to avoid issues while updating state */ + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + WX_WRITE_FLUSH(wx); + + wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba)); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_TR_RP(reg_idx), 0); + wr32(wx, WX_PX_TR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx); + + if (ring->count < WX_MAX_TXD) + txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; + txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct wx_tx_buffer) * ring->count); + + /* enable queue */ + wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE, + 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx)); + if (ret == -ETIMEDOUT) + wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void wx_configure_rx_ring(struct wx *wx, + struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + union wx_rx_desc *rx_desc; + u64 rdba = ring->dma; + u32 rxdctl; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + wx_disable_rx_queue(wx, ring); + + wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba)); + + if (ring->count == WX_MAX_RXD) + rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; + wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(wx, WX_PX_RR_RP(reg_idx), 0); + wr32(wx, WX_PX_RR_WP(reg_idx), 0); + ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); + + wx_configure_srrctl(wx, ring); + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct wx_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = WX_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor ring */ + wr32m(wx, WX_PX_RR_CFG(reg_idx), + WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN); + + wx_enable_rx_queue(wx, ring); + wx_alloc_rx_buffers(ring, wx_desc_unused(ring)); +} + +/** + * wx_configure_tx - Configure Transmit Unit after Reset + * @wx: pointer to private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void wx_configure_tx(struct wx *wx) +{ + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(wx, WX_TDM_CTL, + WX_TDM_CTL_TE, WX_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < wx->num_tx_queues; i++) + wx_configure_tx_ring(wx, wx->tx_ring[i]); + + wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10); + + if (wx->mac.type == wx_mac_em) + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1); + + /* enable mac transmitter */ + wr32m(wx, WX_MAC_TX_CFG, + WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE); +} + +/** + * wx_configure_rx - Configure Receive Unit after Reset + * @wx: pointer to private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void wx_configure_rx(struct wx *wx) +{ + u32 psrtype, i; + int ret; + + wx_disable_rx(wx); + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + wr32(wx, WX_RDB_PL_CFG(0), psrtype); + + /* enable hw crc stripping */ + wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); + + if (wx->mac.type == wx_mac_sp) { + u32 psrctl; + + /* RSC Setup */ + psrctl = rd32(wx, WX_PSR_CTL); + psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + psrctl |= WX_PSR_CTL_RSC_DIS; + wr32(wx, WX_PSR_CTL, psrctl); + } + + /* set_rx_buffer_len must be called before ring initialization */ + wx_set_rx_buffer_len(wx); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < wx->num_rx_queues; i++) + wx_configure_rx_ring(wx, wx->rx_ring[i]); + + /* Enable all receives, disable security engine prior to block traffic */ + ret = wx_disable_sec_rx_path(wx); + if (ret < 0) + wx_err(wx, "The register status is abnormal, please check device."); + + wx_enable_rx(wx); + wx_enable_sec_rx_path(wx); +} + +static void wx_configure_isb(struct wx *wx) +{ + /* set ISB Address */ + wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32)); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma)); +} + +void wx_configure(struct wx *wx) +{ + wx_set_rxpba(wx); + wx_configure_port(wx); + + wx_set_rx_mode(wx->netdev); + + wx_enable_sec_rx_path(wx); + + wx_configure_tx(wx); + wx_configure_rx(wx); + wx_configure_isb(wx); +} +EXPORT_SYMBOL(wx_configure); + /** * wx_disable_pcie_master - Disable PCI-express master access - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Disables PCI-Express master access and verifies there are no pending * requests. **/ -int wx_disable_pcie_master(struct wx_hw *wxhw) +int wx_disable_pcie_master(struct wx *wx) { int status = 0; u32 val; /* Always set this bit to ensure any future transactions are blocked */ - pci_clear_master(wxhw->pdev); + pci_clear_master(wx->pdev); /* Exit if master requests are blocked */ - if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING))) + if (!(rd32(wx, WX_PX_TRANSACTION_PENDING))) return 0; /* Poll for master request bit to clear */ status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, - false, wxhw, WX_PX_TRANSACTION_PENDING); + false, wx, WX_PX_TRANSACTION_PENDING); if (status < 0) - wx_err(wxhw, "PCIe transaction pending bit did not clear.\n"); + wx_err(wx, "PCIe transaction pending bit did not clear.\n"); return status; } @@ -781,106 +1548,106 @@ EXPORT_SYMBOL(wx_disable_pcie_master); /** * wx_stop_adapter - Generic stop Tx/Rx units - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ -int wx_stop_adapter(struct wx_hw *wxhw) +int wx_stop_adapter(struct wx *wx) { u16 i; /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ - wxhw->adapter_stopped = true; + wx->adapter_stopped = true; /* Disable the receive unit */ - wx_disable_rx(wxhw); + wx_disable_rx(wx); /* Set interrupt mask to stop interrupts from being generated */ - wx_intr_disable(wxhw, WX_INTR_ALL); + wx_intr_disable(wx, WX_INTR_ALL); /* Clear any pending interrupts, flush previous writes */ - wr32(wxhw, WX_PX_MISC_IC, 0xffffffff); - wr32(wxhw, WX_BME_CTL, 0x3); + wr32(wx, WX_PX_MISC_IC, 0xffffffff); + wr32(wx, WX_BME_CTL, 0x3); /* Disable the transmit unit. Each queue must be disabled. */ - for (i = 0; i < wxhw->mac.max_tx_queues; i++) { - wr32m(wxhw, WX_PX_TR_CFG(i), + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32m(wx, WX_PX_TR_CFG(i), WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, WX_PX_TR_CFG_SWFLSH); } /* Disable the receive unit by stopping each queue */ - for (i = 0; i < wxhw->mac.max_rx_queues; i++) { - wr32m(wxhw, WX_PX_RR_CFG(i), + for (i = 0; i < wx->mac.max_rx_queues; i++) { + wr32m(wx, WX_PX_RR_CFG(i), WX_PX_RR_CFG_RR_EN, 0); } /* flush all queues disables */ - WX_WRITE_FLUSH(wxhw); + WX_WRITE_FLUSH(wx); /* Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ - return wx_disable_pcie_master(wxhw); + return wx_disable_pcie_master(wx); } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx_hw *wxhw) +void wx_reset_misc(struct wx *wx) { int i; /* receive packets that size > 2048 */ - wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); /* clear counters on read */ - wr32m(wxhw, WX_MMC_CONTROL, + wr32m(wx, WX_MMC_CONTROL, WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); - wr32m(wxhw, WX_MAC_RX_FLOW_CTRL, + wr32m(wx, WX_MAC_RX_FLOW_CTRL, WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); - wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - wr32m(wxhw, WX_MIS_RST_ST, + wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ - wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0); + wr32(wx, WX_PSR_MNG_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0); } - wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0); + wr32(wx, WX_PSR_LAN_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0); } /* set pause frame dst mac addr */ - wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001); - wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180); + wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001); + wr32(wx, WX_RDB_PFCMACDAH, 0x0180); } EXPORT_SYMBOL(wx_reset_misc); /** * wx_get_pcie_msix_counts - Gets MSI-X vector count - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @msix_count: number of MSI interrupts that can be obtained * @max_msix_count: number of MSI interrupts that mac need * * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count) +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; struct device *dev = &pdev->dev; int pos; @@ -904,31 +1671,39 @@ int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_co } EXPORT_SYMBOL(wx_get_pcie_msix_counts); -int wx_sw_init(struct wx_hw *wxhw) +int wx_sw_init(struct wx *wx) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; u32 ssid = 0; int err = 0; - wxhw->vendor_id = pdev->vendor; - wxhw->device_id = pdev->device; - wxhw->revision_id = pdev->revision; - wxhw->oem_svid = pdev->subsystem_vendor; - wxhw->oem_ssid = pdev->subsystem_device; - wxhw->bus.device = PCI_SLOT(pdev->devfn); - wxhw->bus.func = PCI_FUNC(pdev->devfn); - - if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) { - wxhw->subsystem_vendor_id = pdev->subsystem_vendor; - wxhw->subsystem_device_id = pdev->subsystem_device; + wx->vendor_id = pdev->vendor; + wx->device_id = pdev->device; + wx->revision_id = pdev->revision; + wx->oem_svid = pdev->subsystem_vendor; + wx->oem_ssid = pdev->subsystem_device; + wx->bus.device = PCI_SLOT(pdev->devfn); + wx->bus.func = PCI_FUNC(pdev->devfn); + + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { + wx->subsystem_vendor_id = pdev->subsystem_vendor; + wx->subsystem_device_id = pdev->subsystem_device; } else { - err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid); + err = wx_flash_read_dword(wx, 0xfffdc, &ssid); if (!err) - wxhw->subsystem_device_id = swab16((u16)ssid); + wx->subsystem_device_id = swab16((u16)ssid); return err; } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, + sizeof(struct wx_mac_addr), + GFP_KERNEL); + if (!wx->mac_table) { + wx_err(wx, "mac_table allocation failed\n"); + return -ENOMEM; + } + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index a0652f5e9939..44dfd6ea442a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,25 +4,31 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit); -void wx_control_hw(struct wx_hw *wxhw, bool drv); -int wx_mng_present(struct wx_hw *wxhw); -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +void wx_intr_enable(struct wx *wx, u64 qmask); +void wx_irq_disable(struct wx *wx); +int wx_check_flash_load(struct wx *wx, u32 check_bit); +void wx_control_hw(struct wx *wx, bool drv); +int wx_mng_present(struct wx *wx); +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data); -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data); -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); -int wx_reset_hostif(struct wx_hw *wxhw); -void wx_init_eeprom_params(struct wx_hw *wxhw); -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr); -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr); -int wx_clear_rar(struct wx_hw *wxhw, u32 index); -void wx_init_rx_addrs(struct wx_hw *wxhw); -void wx_disable_rx(struct wx_hw *wxhw); -int wx_disable_pcie_master(struct wx_hw *wxhw); -int wx_stop_adapter(struct wx_hw *wxhw); -void wx_reset_misc(struct wx_hw *wxhw); -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count); -int wx_sw_init(struct wx_hw *wxhw); +int wx_reset_hostif(struct wx *wx); +void wx_init_eeprom_params(struct wx *wx); +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); +void wx_init_rx_addrs(struct wx *wx); +void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +void wx_flush_sw_mac_table(struct wx *wx); +int wx_set_mac(struct net_device *netdev, void *p); +void wx_disable_rx(struct wx *wx); +void wx_set_rx_mode(struct net_device *netdev); +void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); +void wx_configure(struct wx *wx); +int wx_disable_pcie_master(struct wx *wx); +int wx_stop_adapter(struct wx *wx); +void wx_reset_misc(struct wx *wx); +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); +int wx_sw_init(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c new file mode 100644 index 000000000000..eb89a274083e --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -0,0 +1,2004 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <net/page_pool.h> +#include <linux/iopoll.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_lib.h" +#include "wx_hw.h" + +/* wx_test_staterr - tests bits in Rx descriptor status and error fields */ +static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * wx_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void wx_reuse_rx_page(struct wx_ring *rx_ring, + struct wx_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct wx_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->page_dma = old_buff->page_dma; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void wx_dma_sync_frag(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer) +{ + struct sk_buff *skb = rx_buffer->skb; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + WX_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + + /* If the page was released, just unmap it. */ + if (unlikely(WX_CB(skb)->page_released)) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); +} + +static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff **skb, + int *rx_buffer_pgcnt) +{ + struct wx_rx_buffer *rx_buffer; + unsigned int size; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + size = le16_to_cpu(rx_desc->wb.upper.length); + +#if (PAGE_SIZE < 8192) + *rx_buffer_pgcnt = page_count(rx_buffer->page); +#else + *rx_buffer_pgcnt = 0; +#endif + + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + wx_dma_sync_frag(rx_ring, rx_buffer); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void wx_put_rx_buffer(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + struct sk_buff *skb, + int rx_buffer_pgcnt) +{ + if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + wx_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; + else + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, + struct wx_rx_buffer *rx_buffer, + union wx_rx_desc *rx_desc) +{ + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = WX_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + struct sk_buff *skb = rx_buffer->skb; + + if (!skb) { + void *page_addr = page_address(rx_buffer->page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); + if (unlikely(!skb)) + return NULL; + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + if (size <= WX_RXBUFFER_256) { + memcpy(__skb_put(skb, size), page_addr, + ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + + return skb; + } + + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) + WX_CB(skb)->dma = rx_buffer->dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); + goto out; + + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + } + +out: +#if (PAGE_SIZE < 8192) + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, + struct wx_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = page_pool_dev_alloc_pages(rx_ring->page_pool); + WARN_ON(!page); + dma = page_pool_get_dma_addr(page); + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * wx_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union wx_rx_desc *rx_desc; + struct wx_rx_buffer *bi; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = WX_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!wx_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = WX_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +u16 wx_desc_unused(struct wx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +/** + * wx_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool wx_is_non_eop(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(WX_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) + return false; + + rx_ring->rx_buffer_info[ntc].skb = skb; + + return true; +} + +static void wx_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * wx_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool wx_cleanup_headers(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (!netdev || + unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + wx_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int wx_clean_rx_irq(struct wx_q_vector *q_vector, + struct wx_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = wx_desc_unused(rx_ring); + + do { + struct wx_rx_buffer *rx_buffer; + union wx_rx_desc *rx_desc; + struct sk_buff *skb; + int rx_buffer_pgcnt; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= WX_RX_BUFFER_WRITE) { + wx_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); + + /* retrieve a buffer from the ring */ + skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_buffer->pagecnt_bias++; + break; + } + + wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (wx_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (wx_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + napi_gro_receive(&q_vector->napi, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +/** + * wx_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + **/ +static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, + struct wx_ring *tx_ring, int napi_budget) +{ + unsigned int budget = q_vector->wx->tx_work_limit; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i = tx_ring->next_to_clean; + struct wx_tx_buffer *tx_buffer; + union wx_tx_desc *tx_desc; + + if (!netif_carrier_ok(tx_ring->netdev)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = WX_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union wx_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + netdev_tx_completed_queue(wx_txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + netif_running(tx_ring->netdev)) + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + } + + return !!budget; +} + +/** + * wx_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +static int wx_poll(struct napi_struct *napi, int budget) +{ + struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); + int per_ring_budget, work_done = 0; + struct wx *wx = q_vector->wx; + bool clean_complete = true; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) { + if (!wx_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + wx_for_each_ring(ring, q_vector->rx) { + int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget); + + work_done += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (netif_running(wx->netdev)) + wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx)); + } + + return min(work_done, budget - 1); +} + +static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) +{ + if (likely(wx_desc_unused(tx_ring) >= size)) + return 0; + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* For the next check */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(wx_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static void wx_tx_map(struct wx_ring *tx_ring, + struct wx_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct wx_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + union wx_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; + tx_desc = WX_TX_DESC(tx_ring, i); + + tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT); + + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > WX_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += WX_MAX_DATA_PER_TXD; + size -= WX_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = WX_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | WX_TXD_EOP | WX_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + wx_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; +} + +static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, + struct wx_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct wx_tx_buffer *first; + unsigned short f; + + /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (wx_maybe_stop_tx(tx_ring, count + 3)) + return NETDEV_TX_BUSY; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + wx_tx_map(tx_ring, first); + + return NETDEV_TX_OK; +} + +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + unsigned int r_idx = skb->queue_mapping; + struct wx *wx = netdev_priv(netdev); + struct wx_ring *tx_ring; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= wx->num_tx_queues) + r_idx = r_idx % wx->num_tx_queues; + tx_ring = wx->tx_ring[r_idx]; + + return wx_xmit_frame_ring(skb, tx_ring); +} +EXPORT_SYMBOL(wx_xmit_frame); + +void wx_napi_enable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_enable_all); + +void wx_napi_disable_all(struct wx *wx) +{ + struct wx_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { + q_vector = wx->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} +EXPORT_SYMBOL(wx_napi_disable_all); + +/** + * wx_set_rss_queues: Allocate queues for RSS + * @wx: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static void wx_set_rss_queues(struct wx *wx) +{ + wx->num_rx_queues = wx->mac.max_rx_queues; + wx->num_tx_queues = wx->mac.max_tx_queues; +} + +static void wx_set_num_queues(struct wx *wx) +{ + /* Start with base case */ + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->queues_per_pool = 1; + + wx_set_rss_queues(wx); +} + +/** + * wx_acquire_msix_vectors - acquire MSI-X vectors + * @wx: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int wx_acquire_msix_vectors(struct wx *wx) +{ + struct irq_affinity affd = {0, }; + int nvecs, i; + + nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + + wx->msix_entries = kcalloc(nvecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entries) + return -ENOMEM; + + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, + nvecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &affd); + if (nvecs < 0) { + wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); + kfree(wx->msix_entries); + wx->msix_entries = NULL; + return nvecs; + } + + for (i = 0; i < nvecs; i++) { + wx->msix_entries[i].entry = i; + wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + } + + /* one for msix_other */ + nvecs -= 1; + wx->num_q_vectors = nvecs; + wx->num_rx_queues = nvecs; + wx->num_tx_queues = nvecs; + + return 0; +} + +/** + * wx_set_interrupt_capability - set MSI-X or MSI if supported + * @wx: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int wx_set_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int nvecs, ret; + + /* We will try to get MSI-X interrupts first */ + ret = wx_acquire_msix_vectors(wx); + if (ret == 0 || (ret == -ENOMEM)) + return ret; + + wx->num_rx_queues = 1; + wx->num_tx_queues = 1; + wx->num_q_vectors = 1; + + /* minmum one for queue, one for misc*/ + nvecs = 1; + nvecs = pci_alloc_irq_vectors(pdev, nvecs, + nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (nvecs == 1) { + if (pdev->msi_enabled) + wx_err(wx, "Fallback to MSI.\n"); + else + wx_err(wx, "Fallback to LEGACY.\n"); + } else { + wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); + return nvecs; + } + + pdev->irq = pci_irq_vector(pdev, 0); + + return 0; +} + +/** + * wx_cache_ring_rss - Descriptor ring to register mapping for RSS + * @wx: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static void wx_cache_ring_rss(struct wx *wx) +{ + u16 i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->reg_idx = i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->reg_idx = i; +} + +static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * wx_alloc_q_vector - Allocate memory for a single interrupt vector + * @wx: board private structure to initialize + * @v_count: q_vectors allocated on wx, used for ring interleaving + * @v_idx: index of vector in wx struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int wx_alloc_q_vector(struct wx *wx, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct wx_q_vector *q_vector; + int ring_count, default_itr; + struct wx_ring *ring; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(wx->netdev, &q_vector->napi, + wx_poll); + + /* tie q_vector and wx together */ + wx->q_vector[v_idx] = q_vector; + q_vector->wx = wx; + q_vector->v_idx = v_idx; + if (cpu_online(v_idx)) + q_vector->numa_node = cpu_to_node(v_idx); + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (wx->mac.type == wx_mac_sp) + default_itr = WX_12K_ITR; + else + default_itr = WX_7K_ITR; + /* initialize ITR */ + if (txr_count && !rxr_count) + /* tx only vector */ + q_vector->itr = wx->tx_itr_setting ? + default_itr : wx->tx_itr_setting; + else + /* rx or rx/tx vector */ + q_vector->itr = wx->rx_itr_setting ? + default_itr : wx->rx_itr_setting; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + wx_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = wx->tx_ring_count; + + ring->queue_index = txr_idx; + + /* assign ring to wx */ + wx->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &wx->pdev->dev; + ring->netdev = wx->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + wx_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = wx->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to wx */ + wx->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * wx_free_q_vector - Free memory allocated for specific interrupt vector + * @wx: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vector(struct wx *wx, int v_idx) +{ + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->tx) + wx->tx_ring[ring->queue_index] = NULL; + + wx_for_each_ring(ring, q_vector->rx) + wx->rx_ring[ring->queue_index] = NULL; + + wx->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * wx_alloc_q_vectors - Allocate memory for interrupt vectors + * @wx: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int wx_alloc_q_vectors(struct wx *wx) +{ + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + unsigned int rxr_remaining = wx->num_rx_queues; + unsigned int txr_remaining = wx->num_tx_queues; + unsigned int q_vectors = wx->num_q_vectors; + int rqpv, tqpv; + int err; + + for (; v_idx < q_vectors; v_idx++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = wx_alloc_q_vector(wx, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); + + return -ENOMEM; +} + +/** + * wx_free_q_vectors - Free memory allocated for interrupt vectors + * @wx: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void wx_free_q_vectors(struct wx *wx) +{ + int v_idx = wx->num_q_vectors; + + wx->num_tx_queues = 0; + wx->num_rx_queues = 0; + wx->num_q_vectors = 0; + + while (v_idx--) + wx_free_q_vector(wx, v_idx); +} + +void wx_reset_interrupt_capability(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + if (!pdev->msi_enabled && !pdev->msix_enabled) + return; + + pci_free_irq_vectors(wx->pdev); + if (pdev->msix_enabled) { + kfree(wx->msix_entries); + wx->msix_entries = NULL; + } +} +EXPORT_SYMBOL(wx_reset_interrupt_capability); + +/** + * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @wx: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void wx_clear_interrupt_scheme(struct wx *wx) +{ + wx_free_q_vectors(wx); + wx_reset_interrupt_capability(wx); +} +EXPORT_SYMBOL(wx_clear_interrupt_scheme); + +int wx_init_interrupt_scheme(struct wx *wx) +{ + int ret; + + /* Number of supported queues */ + wx_set_num_queues(wx); + + /* Set interrupt mode */ + ret = wx_set_interrupt_capability(wx); + if (ret) { + wx_err(wx, "Allocate irq vectors for failed.\n"); + return ret; + } + + /* Allocate memory for queues */ + ret = wx_alloc_q_vectors(wx); + if (ret) { + wx_err(wx, "Unable to allocate memory for queue vectors.\n"); + wx_reset_interrupt_capability(wx); + return ret; + } + + wx_cache_ring_rss(wx); + + return 0; +} +EXPORT_SYMBOL(wx_init_interrupt_scheme); + +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(wx_msix_clean_rings); + +void wx_free_irq(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + int vector; + + if (!(pdev->msix_enabled)) { + free_irq(pdev->irq, wx); + return; + } + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + free_irq(entry->vector, q_vector); + } + + free_irq(wx->msix_entries[vector].vector, wx); +} +EXPORT_SYMBOL(wx_free_irq); + +/** + * wx_setup_isb_resources - allocate interrupt status resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +int wx_setup_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + wx->isb_mem = dma_alloc_coherent(&pdev->dev, + sizeof(u32) * 4, + &wx->isb_dma, + GFP_KERNEL); + if (!wx->isb_mem) { + wx_err(wx, "Alloc isb_mem failed\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(wx_setup_isb_resources); + +/** + * wx_free_isb_resources - allocate all queues Rx resources + * @wx: board private structure + * + * Return 0 on success, negative on failure + **/ +void wx_free_isb_resources(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + + dma_free_coherent(&pdev->dev, sizeof(u32) * 4, + wx->isb_mem, wx->isb_dma); + wx->isb_mem = NULL; +} +EXPORT_SYMBOL(wx_free_isb_resources); + +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) +{ + u32 cur_tag = 0; + + cur_tag = wx->isb_mem[WX_ISB_HEADER]; + wx->isb_tag[idx] = cur_tag; + + return (__force u32)cpu_to_le32(wx->isb_mem[idx]); +} +EXPORT_SYMBOL(wx_misc_isb); + +/** + * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @wx: pointer to wx struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void wx_set_ivar(struct wx *wx, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + + if (direction == -1) { + /* other causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(wx, WX_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= WX_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(wx, WX_PX_IVAR(queue >> 1), ivar); + } +} + +/** + * wx_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void wx_write_eitr(struct wx_q_vector *q_vector) +{ + struct wx *wx = q_vector->wx; + int v_idx = q_vector->v_idx; + u32 itr_reg; + + if (wx->mac.type == wx_mac_sp) + itr_reg = q_vector->itr & WX_SP_MAX_EITR; + else + itr_reg = q_vector->itr & WX_EM_MAX_EITR; + + itr_reg |= WX_PX_ITR_CNT_WDIS; + + wr32(wx, WX_PX_ITR(v_idx), itr_reg); +} + +/** + * wx_configure_vectors - Configure vectors for hardware + * @wx: board private structure + * + * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY + * interrupts. + **/ +void wx_configure_vectors(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + u32 eitrsel = 0; + u16 v_idx; + + if (pdev->msix_enabled) { + /* Populate MSIX to EITR Select */ + wr32(wx, WX_PX_ITRSEL, eitrsel); + /* use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL); + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts. + */ + wr32(wx, WX_PX_GPIE, 0); + } + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { + struct wx_q_vector *q_vector = wx->q_vector[v_idx]; + struct wx_ring *ring; + + wx_for_each_ring(ring, q_vector->rx) + wx_set_ivar(wx, 0, ring->reg_idx, v_idx); + + wx_for_each_ring(ring, q_vector->tx) + wx_set_ivar(wx, 1, ring->reg_idx, v_idx); + + wx_write_eitr(q_vector); + } + + wx_set_ivar(wx, -1, 0, v_idx); + if (pdev->msix_enabled) + wr32(wx, WX_PX_ITR(v_idx), 1950); +} +EXPORT_SYMBOL(wx_configure_vectors); + +/** + * wx_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void wx_clean_rx_ring(struct wx_ring *rx_ring) +{ + struct wx_rx_buffer *rx_buffer; + u16 i = rx_ring->next_to_clean; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + if (WX_CB(skb)->page_released) + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + + dev_kfree_skb(skb); + } + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + WX_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * wx_clean_all_rx_rings - Free Rx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_rx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_clean_rx_ring(wx->rx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_rx_rings); + +/** + * wx_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void wx_free_rx_resources(struct wx_ring *rx_ring) +{ + wx_clean_rx_ring(rx_ring); + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; + + if (rx_ring->page_pool) { + page_pool_destroy(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } +} + +/** + * wx_free_all_rx_resources - Free Rx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all receive software resources + **/ +static void wx_free_all_rx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_rx_queues; i++) + wx_free_rx_resources(wx->rx_ring[i]); +} + +/** + * wx_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void wx_clean_tx_ring(struct wx_ring *tx_ring) +{ + struct wx_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_clean; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union wx_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = WX_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = WX_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(wx_txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * wx_clean_all_tx_rings - Free Tx Buffers for all queues + * @wx: board private structure + **/ +void wx_clean_all_tx_rings(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_clean_tx_ring(wx->tx_ring[i]); +} +EXPORT_SYMBOL(wx_clean_all_tx_rings); + +/** + * wx_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void wx_free_tx_resources(struct wx_ring *tx_ring) +{ + wx_clean_tx_ring(tx_ring); + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * wx_free_all_tx_resources - Free Tx Resources for All Queues + * @wx: pointer to hardware structure + * + * Free all transmit software resources + **/ +static void wx_free_all_tx_resources(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_tx_queues; i++) + wx_free_tx_resources(wx->tx_ring[i]); +} + +void wx_free_resources(struct wx *wx) +{ + wx_free_isb_resources(wx); + wx_free_all_rx_resources(wx); + wx_free_all_tx_resources(wx); +} +EXPORT_SYMBOL(wx_free_resources); + +static int wx_alloc_page_pool(struct wx_ring *rx_ring) +{ + int ret = 0; + + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rx_ring->size, + .nid = dev_to_node(rx_ring->dev), + .dev = rx_ring->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE, + }; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + ret = PTR_ERR(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } + + return ret; +} + +/** + * wx_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int wx_setup_rx_resources(struct wx_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size, ret; + + size = sizeof(struct wx_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + set_dev_node(dev, orig_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + } + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + ret = wx_alloc_page_pool(rx_ring); + if (ret < 0) { + dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); + goto err; + } + + return 0; +err: + kvfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_rx_resources - allocate all queues Rx resources + * @wx: pointer to hardware structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_rx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_rx_queues; i++) { + err = wx_setup_rx_resources(wx->rx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_rx_resources(wx->rx_ring[i]); + return err; +} + +/** + * wx_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_tx_resources(struct wx_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct wx_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + set_dev_node(dev, orig_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + } + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + kvfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * wx_setup_all_tx_resources - allocate all queues Tx resources + * @wx: pointer to private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int wx_setup_all_tx_resources(struct wx *wx) +{ + int i, err = 0; + + for (i = 0; i < wx->num_tx_queues; i++) { + err = wx_setup_tx_resources(wx->tx_ring[i]); + if (!err) + continue; + + wx_err(wx, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + wx_free_tx_resources(wx->tx_ring[i]); + return err; +} + +int wx_setup_resources(struct wx *wx) +{ + int err; + + /* allocate transmit descriptors */ + err = wx_setup_all_tx_resources(wx); + if (err) + return err; + + /* allocate receive descriptors */ + err = wx_setup_all_rx_resources(wx); + if (err) + goto err_free_tx; + + err = wx_setup_isb_resources(wx); + if (err) + goto err_free_rx; + + return 0; + +err_free_rx: + wx_free_all_rx_resources(wx); +err_free_tx: + wx_free_all_tx_resources(wx); + + return err; +} +EXPORT_SYMBOL(wx_setup_resources); + +/** + * wx_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + */ +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct wx *wx = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL(wx_get_stats64); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h new file mode 100644 index 000000000000..50ee41f1fa10 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _WX_LIB_H_ +#define _WX_LIB_H_ + +void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); +u16 wx_desc_unused(struct wx_ring *ring); +netdev_tx_t wx_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +void wx_napi_enable_all(struct wx *wx); +void wx_napi_disable_all(struct wx *wx); +void wx_reset_interrupt_capability(struct wx *wx); +void wx_clear_interrupt_scheme(struct wx *wx); +int wx_init_interrupt_scheme(struct wx *wx); +irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data); +void wx_free_irq(struct wx *wx); +int wx_setup_isb_resources(struct wx *wx); +void wx_free_isb_resources(struct wx *wx); +u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_configure_vectors(struct wx *wx); +void wx_clean_all_rx_rings(struct wx *wx); +void wx_clean_all_tx_rings(struct wx *wx); +void wx_free_resources(struct wx *wx); +int wx_setup_resources(struct wx *wx); +void wx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); + +#endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1cbeef8230bf..77d8d7f1707e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -4,6 +4,9 @@ #ifndef _WX_TYPE_H_ #define _WX_TYPE_H_ +#include <linux/bitfield.h> +#include <linux/netdevice.h> + /* Vendor ID */ #ifndef PCI_VENDOR_ID_WANGXUN #define PCI_VENDOR_ID_WANGXUN 0x8088 @@ -36,12 +39,11 @@ #define WX_SPI_CMD 0x10104 #define WX_SPI_CMD_READ_DWORD 0x1 #define WX_SPI_CLK_DIV 0x3 -#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) -#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) -#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v) +#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v) +#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v) #define WX_SPI_DATA 0x10108 #define WX_SPI_DATA_BYPASS BIT(31) -#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) #define WX_SPI_DATA_OP_DONE BIT(0) #define WX_SPI_STATUS 0x1010C #define WX_SPI_STATUS_OPDONE BIT(0) @@ -64,21 +66,50 @@ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) +#define WX_CFG_PORT_CTL_QINQ BIT(2) +#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ +#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + +/* GPIO Registers */ +#define WX_GPIO_DR 0x14800 +#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */ +#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */ +#define WX_GPIO_DDR 0x14804 +#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ +#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +#define WX_GPIO_CTL 0x14808 +#define WX_GPIO_INTEN 0x14830 +#define WX_GPIO_INTEN_0 BIT(0) +#define WX_GPIO_INTEN_1 BIT(1) +#define WX_GPIO_INTMASK 0x14834 +#define WX_GPIO_INTTYPE_LEVEL 0x14838 +#define WX_GPIO_POLARITY 0x1483C +#define WX_GPIO_EOI 0x1484C /*********************** Transmit DMA registers **************************/ /* transmit global control */ #define WX_TDM_CTL 0x18000 /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ +#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) /***************************** RDB registers *********************************/ /* receive packet buffer */ #define WX_RDB_PB_CTL 0x19000 #define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */ #define WX_RDB_PB_CTL_DISABLED BIT(0) +#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define WX_RDB_PB_SZ_SHIFT 10 /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +/* ring assignment */ +#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define WX_RDB_PL_CFG_L4HDR BIT(1) +#define WX_RDB_PL_CFG_L3HDR BIT(2) +#define WX_RDB_PL_CFG_L2HDR BIT(3) +#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) +#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) /******************************* PSR Registers *******************************/ /* psr control */ @@ -96,10 +127,24 @@ #define WX_PSR_CTL_MO_SHIFT 5 #define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT) #define WX_PSR_CTL_TPE BIT(4) +#define WX_PSR_MAX_SZ 0x15020 +#define WX_PSR_VLAN_CTL 0x15088 +#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ +#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ /* mcasst/ucast overflow tbl */ #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) +/* VM L2 contorl */ +#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ +#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ +#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ +#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ +#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */ +#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */ + /* Management */ #define WX_PSR_MNG_FLEX_SEL 0x1582C #define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) @@ -113,14 +158,35 @@ /* mac switcher */ #define WX_PSR_MAC_SWC_AD_L 0x16200 #define WX_PSR_MAC_SWC_AD_H 0x16204 -#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) -#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v) +#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v) #define WX_PSR_MAC_SWC_AD_H_AV BIT(31) #define WX_PSR_MAC_SWC_VM_L 0x16208 #define WX_PSR_MAC_SWC_VM_H 0x1620C #define WX_PSR_MAC_SWC_IDX 0x16210 #define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU +/********************************* RSEC **************************************/ +/* general rsec */ +#define WX_RSC_CTL 0x17000 +#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6) +#define WX_RSC_CTL_CRC_STRIP BIT(2) +#define WX_RSC_CTL_RX_DIS BIT(1) +#define WX_RSC_ST 0x17004 +#define WX_RSC_ST_RSEC_RDY BIT(0) + +/****************************** TDB ******************************************/ +#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) +#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define WX_TSC_CTL 0x1D000 +#define WX_TSC_CTL_TX_DIS BIT(1) +#define WX_TSC_CTL_TSEC_DIS BIT(0) +#define WX_TSC_BUF_AE 0x1D00C +#define WX_TSC_BUF_AE_THR GENMASK(9, 0) + /************************************** MNG ********************************/ #define WX_MNG_SWFW_SYNC 0x1E008 #define WX_MNG_SWFW_SYNC_SW_MB BIT(2) @@ -133,11 +199,15 @@ /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 #define WX_MAC_TX_CFG_TE BIT(0) +#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29) +#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0) +#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3) #define WX_MAC_RX_CFG 0x11004 #define WX_MAC_RX_CFG_RE BIT(0) #define WX_MAC_RX_CFG_JE BIT(8) #define WX_MAC_PKT_FLT 0x11008 #define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ +#define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ #define WX_MMC_CONTROL 0x11800 @@ -147,10 +217,34 @@ /* Interrupt Registers */ #define WX_BME_CTL 0x12020 #define WX_PX_MISC_IC 0x100 +#define WX_PX_MISC_ICS 0x104 +#define WX_PX_MISC_IEN 0x108 +#define WX_PX_INTA 0x110 +#define WX_PX_GPIE 0x118 +#define WX_PX_GPIE_MODEL BIT(0) +#define WX_PX_IC 0x120 #define WX_PX_IMS(_i) (0x140 + (_i) * 4) +#define WX_PX_IMC(_i) (0x150 + (_i) * 4) +#define WX_PX_ISB_ADDR_L 0x160 +#define WX_PX_ISB_ADDR_H 0x164 #define WX_PX_TRANSACTION_PENDING 0x168 +#define WX_PX_ITRSEL 0x180 +#define WX_PX_ITR(_i) (0x200 + (_i) * 4) +#define WX_PX_ITR_CNT_WDIS BIT(31) +#define WX_PX_MISC_IVAR 0x4FC +#define WX_PX_IVAR(_i) (0x500 + (_i) * 4) + +#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define WX_7K_ITR 595 +#define WX_12K_ITR 336 +#define WX_SP_MAX_EITR 0x00000FF8U +#define WX_EM_MAX_EITR 0x00007FFCU /* transmit DMA Registers */ +#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) #define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) /* Transmit Config masks */ #define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ @@ -160,8 +254,22 @@ #define WX_PX_TR_CFG_THRE_SHIFT 8 /* Receive DMA Registers */ +#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ +#define WX_PX_RR_CFG_SPLIT_MODE BIT(26) +#define WX_PX_RR_CFG_RR_THER_SHIFT 16 +#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) +#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) +#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1 #define WX_PX_RR_CFG_RR_EN BIT(0) /* Number of 80 microseconds we wait for PCI Express master disable */ @@ -185,6 +293,50 @@ #define WX_SW_REGION_PTR 0x1C +#define WX_MAC_STATE_DEFAULT 0x1 +#define WX_MAC_STATE_MODIFIED 0x2 +#define WX_MAC_STATE_IN_USE 0x4 + +#define WX_MAX_RXD 8192 +#define WX_MAX_TXD 8192 + +/* Supported Rx Buffer Sizes */ +#define WX_RXBUFFER_256 256 /* Used for skb receive header */ +#define WX_RXBUFFER_2K 2048 +#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#if MAX_SKB_FRAGS < 8 +#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) +#else +#define WX_RX_BUFSZ WX_RXBUFFER_2K +#endif + +#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define WX_MAX_DATA_PER_TXD BIT(14) +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* Ether Types */ +#define WX_ETH_P_CNM 0x22E7 + +#define WX_CFG_PORT_ST 0x14404 + +/******************* Receive Descriptor bit definitions **********************/ +#define WX_RXD_STAT_DD BIT(0) /* Done */ +#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ + +#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ + +/*********************** Transmit Descriptor Config Masks ****************/ +#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ +#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ +#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */ +#define WX_TXD_EOP BIT(24) /* End of Packet */ +#define WX_TXD_IFCS BIT(25) /* Insert FCS */ +#define WX_TXD_RS BIT(27) /* Report Status */ + /* Host Interface Command Structures */ struct wx_hic_hdr { u8 cmd; @@ -249,14 +401,23 @@ enum wx_mac_type { wx_mac_em }; +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + struct wx_mac_info { enum wx_mac_type type; bool set_lben; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; + u32 mta_shadow[128]; s32 mc_filter_type; u32 mcft_size; u32 num_rar_entries; + u32 rx_pb_size; + u32 tx_pb_size; u32 max_tx_queues; u32 max_rx_queues; @@ -284,19 +445,183 @@ struct wx_addr_filter_info { bool user_set_promisc; }; +struct wx_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + enum wx_reset_type { WX_LAN_RESET = 0, WX_SW_RESET, WX_GLOBAL_RESET }; -struct wx_hw { +struct wx_cb { + dma_addr_t dma; + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; + +#define WX_CB(skb) ((struct wx_cb *)(skb)->cb) + +/* Transmit Descriptor */ +union wx_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union wx_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define WX_RX_DESC(R, i) \ + (&(((union wx_rx_desc *)((R)->desc))[i])) +#define WX_TX_DESC(R, i) \ + (&(((union wx_tx_desc *)((R)->desc))[i])) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct wx_tx_buffer { + union wx_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct wx_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; +}; + +struct wx_queue_stats { + u64 packets; + u64 bytes; +}; + +/* iterator for handling rings in ring container */ +#define wx_for_each_ring(posm, headm) \ + for (posm = (headm).ring; posm; posm = posm->next) + +struct wx_ring_container { + struct wx_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct wx_ring { + struct wx_ring *next; /* pointer to next ring in q_vector */ + struct wx_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct page_pool *page_pool; + void *desc; /* descriptor ring memory */ + union { + struct wx_tx_buffer *tx_buffer_info; + struct wx_rx_buffer *rx_buffer_info; + }; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct wx_queue_stats stats; + struct u64_stats_sync syncp; +} ____cacheline_internodealigned_in_smp; + +struct wx_q_vector { + struct wx *wx; + int cpu; /* CPU for DCA */ + int numa_node; + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring + */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct wx_ring_container rx, tx; + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + char name[IFNAMSIZ + 17]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct wx_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum wx_isb_idx { + WX_ISB_HEADER, + WX_ISB_MISC, + WX_ISB_VEC0, + WX_ISB_VEC1, + WX_ISB_MAX +}; + +struct wx { u8 __iomem *hw_addr; struct pci_dev *pdev; + struct net_device *netdev; struct wx_bus_info bus; struct wx_mac_info mac; + enum em_mac_type mac_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -304,11 +629,63 @@ struct wx_hw { u8 revision_id; u16 oem_ssid; u16 oem_svid; + u16 msg_enable; bool adapter_stopped; + u16 tpid[8]; + char eeprom_id[32]; + char *driver_name; enum wx_reset_type reset_type; + + /* PHY stuff */ + unsigned int link; + int speed; + int duplex; + struct phy_device *phydev; + + bool wol_enabled; + bool ncsi_enabled; + bool gpio_ctrl; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + + u32 tx_ring_count; + u32 rx_ring_count; + + struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; + struct wx_ring *rx_ring[64]; + struct wx_q_vector *q_vector[64]; + + unsigned int queues_per_pool; + struct msix_entry *msix_entries; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[WX_ISB_MAX]; + +#define WX_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; + +#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + u32 wol; + + u16 bd_number; }; #define WX_INTR_ALL (~0ULL) +#define WX_INTR_Q(i) BIT(i) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) @@ -319,23 +696,23 @@ struct wx_hw { wr32((a), (reg) + ((off) << 2), (val)) static inline u32 -rd32m(struct wx_hw *wxhw, u32 reg, u32 mask) +rd32m(struct wx *wx, u32 reg, u32 mask) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); return val & mask; } static inline void -wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) +wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); val = ((val & ~mask) | (field & mask)); - wr32(wxhw, reg, val); + wr32(wx, reg, val); } /* On some domestic CPU platforms, sometimes IO is not synchronized with @@ -343,10 +720,10 @@ wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) */ #define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) -#define wx_err(wxhw, fmt, arg...) \ - dev_err(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_err(wx, fmt, arg...) \ + dev_err(&(wx)->pdev->dev, fmt, ##arg) -#define wx_dbg(wxhw, fmt, arg...) \ - dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_dbg(wx, fmt, arg...) \ + dev_dbg(&(wx)->pdev->dev, fmt, ##arg) #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile index 391c2cbc1bb4..61a13d98abe7 100644 --- a/drivers/net/ethernet/wangxun/ngbe/Makefile +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_NGBE) += ngbe.o -ngbe-objs := ngbe_main.o ngbe_hw.o +ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o ngbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h deleted file mode 100644 index af147ca8605c..000000000000 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _NGBE_H_ -#define _NGBE_H_ - -#include "ngbe_type.h" - -#define NGBE_MAX_FDIR_INDICES 7 - -#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) -#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) - -#define NGBE_ETH_LENGTH_OF_ADDRESS 6 -#define NGBE_MAX_MSIX_VECTORS 0x09 -#define NGBE_RAR_ENTRIES 32 - -/* TX/RX descriptor defines */ -#define NGBE_DEFAULT_TXD 512 /* default ring size */ -#define NGBE_DEFAULT_TX_WORK 256 -#define NGBE_MAX_TXD 8192 -#define NGBE_MIN_TXD 128 - -#define NGBE_DEFAULT_RXD 512 /* default ring size */ -#define NGBE_DEFAULT_RX_WORK 256 -#define NGBE_MAX_RXD 8192 -#define NGBE_MIN_RXD 128 - -#define NGBE_MAC_STATE_DEFAULT 0x1 -#define NGBE_MAC_STATE_MODIFIED 0x2 -#define NGBE_MAC_STATE_IN_USE 0x4 - -struct ngbe_mac_addr { - u8 addr[ETH_ALEN]; - u16 state; /* bitmask */ - u64 pools; -}; - -/* board specific private data structure */ -struct ngbe_adapter { - u8 __iomem *io_addr; /* Mainly for iounmap use */ - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in ngbe_hw.h */ - struct ngbe_hw hw; - struct ngbe_mac_addr *mac_table; - u16 msg_enable; - - /* Tx fast path data */ - int num_tx_queues; - u16 tx_itr_setting; - u16 tx_work_limit; - - /* Rx fast path data */ - int num_rx_queues; - u16 rx_itr_setting; - u16 rx_work_limit; - - int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* upper limit of q_vectors for device */ - - u32 tx_ring_count; - u32 rx_ring_count; - -#define NGBE_MAX_RETA_ENTRIES 128 - u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; - -#define NGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 *rss_key; - u32 wol; - - u16 bd_number; -}; - -extern char ngbe_driver_name[]; - -#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c new file mode 100644 index 000000000000..5b25834baf38 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phy.h> +#include <linux/netdevice.h> + +#include "../libwx/wx_ethtool.h" +#include "ngbe_ethtool.h" + +static const struct ethtool_ops ngbe_ethtool_ops = { + .get_drvinfo = wx_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, +}; + +void ngbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ngbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h new file mode 100644 index 000000000000..487074e0eeec --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_ETHTOOL_H_ +#define _NGBE_ETHTOOL_H_ + +void ngbe_set_ethtool_ops(struct net_device *netdev); + +#endif /* _NGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 0e3923b3737e..6562a2de9527 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -9,12 +9,10 @@ #include "../libwx/wx_hw.h" #include "ngbe_type.h" #include "ngbe_hw.h" -#include "ngbe.h" -int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw) +int ngbe_eeprom_chksum_hostif(struct wx *wx) { struct wx_hic_read_shadow_ram buffer; - struct wx_hw *wxhw = &hw->wxhw; int status; int tmp; @@ -27,61 +25,73 @@ int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw) /* one word */ buffer.length = 0; - status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer), + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status < 0) return status; - tmp = rd32a(wxhw, WX_MNG_MBOX, 1); + tmp = rd32a(wx, WX_MNG_MBOX, 1); if (tmp == NGBE_FW_CMD_ST_PASS) return 0; return -EIO; } -static int ngbe_reset_misc(struct ngbe_hw *hw) +static int ngbe_reset_misc(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - - wx_reset_misc(wxhw); - if (hw->mac_type == ngbe_mac_type_rgmii) - wr32(wxhw, NGBE_MDIO_CLAUSE_SELECT, 0xF); - if (hw->gpio_ctrl) { + wx_reset_misc(wx); + if (wx->gpio_ctrl) { /* gpio0 is used to power on/off control*/ - wr32(wxhw, NGBE_GPIO_DDR, 0x1); - wr32(wxhw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + wr32(wx, NGBE_GPIO_DDR, 0x1); + ngbe_sfp_modules_txrx_powerctl(wx, false); } return 0; } +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi) +{ + /* gpio0 is used to power on control . 0 is on */ + wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0); +} + /** * ngbe_reset_hw - Perform hardware reset - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -int ngbe_reset_hw(struct ngbe_hw *hw) +int ngbe_reset_hw(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - int status = 0; - u32 reset = 0; + u32 val = 0; + int ret = 0; - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wxhw); - if (status != 0) - return status; - reset = WX_MIS_RST_LAN_RST(wxhw->bus.func); - wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST)); - ngbe_reset_misc(hw); + /* Call wx stop to disable tx/rx and clear interrupts */ + ret = wx_stop_adapter(wx); + if (ret != 0) + return ret; + + if (wx->mac_type != em_mac_type_mdi) { + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + + ret = read_poll_timeout(rd32, val, + !(val & (BIT(9) << wx->bus.func)), 1000, + 100000, false, wx, 0x10028); + if (ret) { + wx_err(wx, "Lan reset exceed s maximum times.\n"); + return ret; + } + } + ngbe_reset_misc(wx); /* Store the permanent mac address */ - wx_get_mac_addr(wxhw, wxhw->mac.perm_addr); + wx_get_mac_addr(wx, wx->mac.perm_addr); /* reset num_rar_entries to 128 */ - wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES; - wx_init_rx_addrs(wxhw); - pci_set_master(wxhw->pdev); + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx_init_rx_addrs(wx); + pci_set_master(wx->pdev); return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h index 42476a3fe57c..a4693e006816 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -7,6 +7,7 @@ #ifndef _NGBE_HW_H_ #define _NGBE_HW_H_ -int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw); -int ngbe_reset_hw(struct ngbe_hw *hw); +int ngbe_eeprom_chksum_hostif(struct wx *wx); +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi); +int ngbe_reset_hw(struct wx *wx); #endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index f0b24366da18..5b564d348c09 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -9,12 +9,16 @@ #include <linux/aer.h> #include <linux/etherdevice.h> #include <net/ip.h> +#include <linux/phy.h> #include "../libwx/wx_type.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_lib.h" #include "ngbe_type.h" +#include "ngbe_mdio.h" #include "ngbe_hw.h" -#include "ngbe.h" +#include "ngbe_ethtool.h" + char ngbe_driver_name[] = "ngbe"; /* ngbe_pci_tbl - PCI Device ID Table @@ -39,70 +43,27 @@ static const struct pci_device_id ngbe_pci_tbl[] = { { .device = 0 } }; -static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, u8 *addr) -{ - struct ngbe_hw *hw = &adapter->hw; - - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].pools = 1ULL; - adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | - NGBE_MAC_STATE_IN_USE); - wx_set_rar(&hw->wxhw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].pools, - WX_PSR_MAC_SWC_AD_H_AV); -} - /** * ngbe_init_type_code - Initialize the shared code - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure **/ -static void ngbe_init_type_code(struct ngbe_hw *hw) +static void ngbe_init_type_code(struct wx *wx) { int wol_mask = 0, ncsi_mask = 0; - struct wx_hw *wxhw = &hw->wxhw; - u16 type_mask = 0; + u16 type_mask = 0, val; - wxhw->mac.type = wx_mac_em; - type_mask = (u16)(wxhw->subsystem_device_id & NGBE_OEM_MASK); - ncsi_mask = wxhw->subsystem_device_id & NGBE_NCSI_MASK; - wol_mask = wxhw->subsystem_device_id & NGBE_WOL_MASK; - - switch (type_mask) { - case NGBE_SUBID_M88E1512_SFP: - case NGBE_SUBID_LY_M88E1512_SFP: - hw->phy.type = ngbe_phy_m88e1512_sfi; - break; - case NGBE_SUBID_M88E1512_RJ45: - hw->phy.type = ngbe_phy_m88e1512; - break; - case NGBE_SUBID_M88E1512_MIX: - hw->phy.type = ngbe_phy_m88e1512_unknown; - break; - case NGBE_SUBID_YT8521S_SFP: - case NGBE_SUBID_YT8521S_SFP_GPIO: - case NGBE_SUBID_LY_YT8521S_SFP: - hw->phy.type = ngbe_phy_yt8521s_sfi; - break; - case NGBE_SUBID_INTERNAL_YT8521S_SFP: - case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: - hw->phy.type = ngbe_phy_internal_yt8521s_sfi; - break; - case NGBE_SUBID_RGMII_FPGA: - case NGBE_SUBID_OCP_CARD: - fallthrough; - default: - hw->phy.type = ngbe_phy_internal; - break; - } + wx->mac.type = wx_mac_em; + type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK); + ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK; + wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK; - if (hw->phy.type == ngbe_phy_internal || - hw->phy.type == ngbe_phy_internal_yt8521s_sfi) - hw->mac_type = ngbe_mac_type_mdi; - else - hw->mac_type = ngbe_mac_type_rgmii; + val = rd32(wx, WX_CFG_PORT_ST); + wx->mac_type = (val & BIT(7)) >> 7 ? + em_mac_type_rgmii : + em_mac_type_mdi; - hw->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; - hw->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || + wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; + wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0; switch (type_mask) { @@ -110,31 +71,31 @@ static void ngbe_init_type_code(struct ngbe_hw *hw) case NGBE_SUBID_LY_M88E1512_SFP: case NGBE_SUBID_YT8521S_SFP_GPIO: case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: - hw->gpio_ctrl = 1; + wx->gpio_ctrl = 1; break; default: - hw->gpio_ctrl = 0; + wx->gpio_ctrl = 0; break; } } /** - * ngbe_init_rss_key - Initialize adapter RSS key - * @adapter: device handle + * ngbe_init_rss_key - Initialize wx RSS key + * @wx: device handle * * Allocates and initializes the RSS key if it is not allocated. **/ -static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter) +static inline int ngbe_init_rss_key(struct wx *wx) { u32 *rss_key; - if (!adapter->rss_key) { - rss_key = kzalloc(NGBE_RSS_KEY_SIZE, GFP_KERNEL); + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); if (unlikely(!rss_key)) return -ENOMEM; - netdev_rss_key_fill(rss_key, NGBE_RSS_KEY_SIZE); - adapter->rss_key = rss_key; + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; } return 0; @@ -142,72 +103,263 @@ static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter) /** * ngbe_sw_init - Initialize general software structures - * @adapter: board private structure to initialize + * @wx: board private structure to initialize **/ -static int ngbe_sw_init(struct ngbe_adapter *adapter) +static int ngbe_sw_init(struct wx *wx) { - struct pci_dev *pdev = adapter->pdev; - struct ngbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct pci_dev *pdev = wx->pdev; u16 msix_count = 0; int err = 0; - wxhw->hw_addr = adapter->io_addr; - wxhw->pdev = pdev; + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; + wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; + wx->mac.mcft_size = NGBE_MC_TBL_SIZE; + wx->mac.rx_pb_size = NGBE_RX_PB_SIZE; + wx->mac.tx_pb_size = NGBE_TDB_PB_SZ; /* PCI config space info */ - err = wx_sw_init(wxhw); + err = wx_sw_init(wx); if (err < 0) { - netif_err(adapter, probe, adapter->netdev, - "Read of internal subsystem device id failed\n"); + wx_err(wx, "read of internal subsystem device id failed\n"); return err; } /* mac type, phy type , oem type */ - ngbe_init_type_code(hw); + ngbe_init_type_code(wx); - wxhw->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; - wxhw->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; - wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES; /* Set common capability flags and settings */ - adapter->max_q_vectors = NGBE_MAX_MSIX_VECTORS; - - err = wx_get_pcie_msix_counts(wxhw, &msix_count, NGBE_MAX_MSIX_VECTORS); + wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS; + err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS); if (err) dev_err(&pdev->dev, "Do not support MSI-X\n"); - wxhw->mac.max_msix_vectors = msix_count; + wx->mac.max_msix_vectors = msix_count; - adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries, - sizeof(struct ngbe_mac_addr), - GFP_KERNEL); - if (!adapter->mac_table) { - dev_err(&pdev->dev, "mac_table allocation failed: %d\n", err); - return -ENOMEM; - } - - if (ngbe_init_rss_key(adapter)) + if (ngbe_init_rss_key(wx)) return -ENOMEM; /* enable itr by default in dynamic mode */ - adapter->rx_itr_setting = 1; - adapter->tx_itr_setting = 1; + wx->rx_itr_setting = 1; + wx->tx_itr_setting = 1; /* set default ring sizes */ - adapter->tx_ring_count = NGBE_DEFAULT_TXD; - adapter->rx_ring_count = NGBE_DEFAULT_RXD; + wx->tx_ring_count = NGBE_DEFAULT_TXD; + wx->rx_ring_count = NGBE_DEFAULT_RXD; /* set default work limits */ - adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; - adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; + wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; return 0; } -static void ngbe_down(struct ngbe_adapter *adapter) +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @wx: board private structure + * @queues: enable all queues interrupts + **/ +static void ngbe_irq_enable(struct wx *wx, bool queues) { - netif_carrier_off(adapter->netdev); - netif_tx_disable(adapter->netdev); -}; + u32 mask; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0); + wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1); + wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0); + wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3); + + wr32(wx, WX_PX_MISC_IEN, mask); + + /* mask interrupt */ + if (queues) + wx_intr_enable(wx, NGBE_INTR_ALL); + else + wx_intr_enable(wx, NGBE_INTR_MISC(wx)); +} + +/** + * ngbe_intr - msi/legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + struct pci_dev *pdev; + u32 eicr; + + q_vector = wx->q_vector[0]; + pdev = wx->pdev; + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + wx->isb_mem[WX_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct wx *wx = data; + + /* re-enable the original interrupt state, no lsc, no queues */ + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @wx: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + int vector, err; + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, entry->entry); + else + /* skip this unused q_vector */ + continue; + + err = request_irq(entry->vector, wx_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; + } + } + + err = request_irq(wx->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, wx); + + if (err) { + wx_err(wx, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(wx->msix_entries[vector].vector, + wx->q_vector[vector]); + } + wx_reset_interrupt_capability(wx); + return err; +} + +/** + * ngbe_request_irq - initialize interrupts + * @wx: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ngbe_request_irq(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + struct pci_dev *pdev = wx->pdev; + int err; + + if (pdev->msix_enabled) + err = ngbe_request_msix_irqs(wx); + else if (pdev->msi_enabled) + err = request_irq(pdev->irq, ngbe_intr, 0, + netdev->name, wx); + else + err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED, + netdev->name, wx); + + if (err) + wx_err(wx, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_disable_device(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + u32 i; + + /* disable all enabled rx queues */ + for (i = 0; i < wx->num_rx_queues; i++) + /* this call also flushes the previous write */ + wx_disable_rx_queue(wx, wx->rx_ring[i]); + /* disable receives */ + wx_disable_rx(wx); + wx_napi_disable_all(wx); + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, false); + wx_irq_disable(wx); + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < wx->num_tx_queues; i++) { + u8 reg_idx = wx->tx_ring[i]->reg_idx; + + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); + } +} + +static void ngbe_down(struct wx *wx) +{ + phy_stop(wx->phydev); + ngbe_disable_device(wx); + wx_clean_all_tx_rings(wx); + wx_clean_all_rx_rings(wx); +} + +static void ngbe_up(struct wx *wx) +{ + wx_configure_vectors(wx); + + /* make sure to complete pre-operations */ + smp_mb__before_atomic(); + wx_napi_enable_all(wx); + /* enable transmits */ + netif_tx_start_all_queues(wx->netdev); + + /* clear any pending interrupts, may auto mask */ + rd32(wx, WX_PX_IC); + rd32(wx, WX_PX_MISC_IC); + ngbe_irq_enable(wx, true); + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, true); + + phy_start(wx->phydev); +} /** * ngbe_open - Called when a network interface is made active @@ -220,13 +372,43 @@ static void ngbe_down(struct ngbe_adapter *adapter) **/ static int ngbe_open(struct net_device *netdev) { - struct ngbe_adapter *adapter = netdev_priv(netdev); - struct ngbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct wx *wx = netdev_priv(netdev); + int err; + + wx_control_hw(wx, true); + + err = wx_setup_resources(wx); + if (err) + return err; + + wx_configure(wx); + + err = ngbe_request_irq(wx); + if (err) + goto err_free_resources; + + err = ngbe_phy_connect(wx); + if (err) + goto err_free_irq; + + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); + if (err) + goto err_dis_phy; + + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); + if (err) + goto err_dis_phy; - wx_control_hw(wxhw, true); + ngbe_up(wx); return 0; +err_dis_phy: + phy_disconnect(wx->phydev); +err_free_irq: + wx_free_irq(wx); +err_free_resources: + wx_free_resources(wx); + return err; } /** @@ -242,66 +424,40 @@ static int ngbe_open(struct net_device *netdev) **/ static int ngbe_close(struct net_device *netdev) { - struct ngbe_adapter *adapter = netdev_priv(netdev); - - ngbe_down(adapter); - wx_control_hw(&adapter->hw.wxhw, false); - - return 0; -} - -static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return NETDEV_TX_OK; -} - -/** - * ngbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ngbe_set_mac(struct net_device *netdev, void *p) -{ - struct ngbe_adapter *adapter = netdev_priv(netdev); - struct wx_hw *wxhw = &adapter->hw.wxhw; - struct sockaddr *addr = p; + struct wx *wx = netdev_priv(netdev); - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - eth_hw_addr_set(netdev, addr->sa_data); - memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len); - - ngbe_mac_set_default_filter(adapter, wxhw->mac.addr); + ngbe_down(wx); + wx_free_irq(wx); + wx_free_resources(wx); + phy_disconnect(wx->phydev); + wx_control_hw(wx, false); return 0; } static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + netdev = wx->netdev; netif_device_detach(netdev); rtnl_lock(); if (netif_running(netdev)) - ngbe_down(adapter); + ngbe_down(wx); rtnl_unlock(); - wx_control_hw(&adapter->hw.wxhw, false); + wx_control_hw(wx, false); pci_disable_device(pdev); } static void ngbe_shutdown(struct pci_dev *pdev) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); bool wake; - wake = !!adapter->wol; + wake = !!wx->wol; ngbe_dev_shutdown(pdev, &wake); @@ -314,9 +470,11 @@ static void ngbe_shutdown(struct pci_dev *pdev) static const struct net_device_ops ngbe_netdev_ops = { .ndo_open = ngbe_open, .ndo_stop = ngbe_close, - .ndo_start_xmit = ngbe_xmit_frame, + .ndo_start_xmit = wx_xmit_frame, + .ndo_set_rx_mode = wx_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ngbe_set_mac, + .ndo_set_mac_address = wx_set_mac, + .ndo_get_stats64 = wx_get_stats64, }; /** @@ -326,18 +484,16 @@ static const struct net_device_ops ngbe_netdev_ops = { * * Returns 0 on success, negative on failure * - * ngbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, + * ngbe_probe initializes an wx identified by a pci_dev structure. + * The OS initialization, configuring of the wx private structure, * and a hardware reset occur. **/ static int ngbe_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { - struct ngbe_adapter *adapter = NULL; - struct ngbe_hw *hw = NULL; - struct wx_hw *wxhw = NULL; struct net_device *netdev; u32 e2rom_cksum_cap = 0; + struct wx *wx = NULL; static int func_nums; u16 e2rom_ver = 0; u32 etrack_id = 0; @@ -368,7 +524,7 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_master(pdev); netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct ngbe_adapter), + sizeof(struct wx), NGBE_MAX_TX_QUEUES, NGBE_MAX_RX_QUEUES); if (!netdev) { @@ -378,63 +534,74 @@ static int ngbe_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - wxhw = &hw->wxhw; - adapter->msg_enable = BIT(3) - 1; - - adapter->io_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!adapter->io_addr) { + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + wx->msg_enable = BIT(3) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { err = -EIO; goto err_pci_release_regions; } + wx->driver_name = ngbe_driver_name; + ngbe_set_ethtool_ops(netdev); netdev->netdev_ops = &ngbe_netdev_ops; netdev->features |= NETIF_F_HIGHDMA; + netdev->features = NETIF_F_SG; - adapter->bd_number = func_nums; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_RXALL; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + wx->bd_number = func_nums; /* setup the private structure */ - err = ngbe_sw_init(adapter); + err = ngbe_sw_init(wx); if (err) goto err_free_mac_table; /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PERST); + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); if (err) goto err_free_mac_table; - err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PWRRST); + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST); if (err) goto err_free_mac_table; - err = wx_mng_present(wxhw); + err = wx_mng_present(wx); if (err) { dev_err(&pdev->dev, "Management capability is not present\n"); goto err_free_mac_table; } - err = ngbe_reset_hw(hw); + err = ngbe_reset_hw(wx); if (err) { dev_err(&pdev->dev, "HW Init failed: %d\n", err); goto err_free_mac_table; } - if (wxhw->bus.func == 0) { - wr32(wxhw, NGBE_CALSUM_CAP_STATUS, 0x0); - wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + if (wx->bus.func == 0) { + wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0); } else { - e2rom_cksum_cap = rd32(wxhw, NGBE_CALSUM_CAP_STATUS); - saved_ver = rd32(wxhw, NGBE_EEPROM_VERSION_STORE_REG); + e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS); + saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG); } - wx_init_eeprom_params(wxhw); - if (wxhw->bus.func == 0 || e2rom_cksum_cap == 0) { + wx_init_eeprom_params(wx); + if (wx->bus.func == 0 || e2rom_cksum_cap == 0) { /* make sure the EEPROM is ready */ - err = ngbe_eeprom_chksum_hostif(hw); + err = ngbe_eeprom_chksum_hostif(wx); if (err) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); err = -EIO; @@ -442,14 +609,14 @@ static int ngbe_probe(struct pci_dev *pdev, } } - adapter->wol = 0; - if (hw->wol_enabled) - adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + wx->wol = 0; + if (wx->wol_enabled) + wx->wol = NGBE_PSR_WKUP_CTL_MAG; - hw->wol_enabled = !!(adapter->wol); - wr32(wxhw, NGBE_PSR_WKUP_CTL, adapter->wol); + wx->wol_enabled = !!(wx->wol); + wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, adapter->wol); + device_set_wakeup_enable(&pdev->dev, wx->wol); /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom @@ -457,37 +624,50 @@ static int ngbe_probe(struct pci_dev *pdev, if (saved_ver) { etrack_id = saved_ver; } else { - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, &e2rom_ver); etrack_id = e2rom_ver << 16; - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, &e2rom_ver); etrack_id |= e2rom_ver; - wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); } + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), + "0x%08x", etrack_id); + + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); - eth_hw_addr_set(netdev, wxhw->mac.perm_addr); - ngbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr); + err = wx_init_interrupt_scheme(wx); + if (err) + goto err_free_mac_table; + + /* phy Interface Configuration */ + err = ngbe_mdio_init(wx); + if (err) + goto err_clear_interrupt_scheme; err = register_netdev(netdev); if (err) goto err_register; - pci_set_drvdata(pdev, adapter); + pci_set_drvdata(pdev, wx); - netif_info(adapter, probe, netdev, + netif_info(wx, probe, netdev, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - hw->phy.type == ngbe_phy_internal ? "Internal" : "External"); - netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr); + wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); return 0; err_register: - wx_control_hw(wxhw, false); + wx_control_hw(wx, false); +err_clear_interrupt_scheme: + wx_clear_interrupt_scheme(wx); err_free_mac_table: - kfree(adapter->mac_table); + kfree(wx->mac_table); err_pci_release_regions: pci_disable_pcie_error_reporting(pdev); pci_release_selected_regions(pdev, @@ -508,15 +688,16 @@ err_pci_disable_dev: **/ static void ngbe_remove(struct pci_dev *pdev) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); struct net_device *netdev; - netdev = adapter->netdev; + netdev = wx->netdev; unregister_netdev(netdev); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(adapter->mac_table); + kfree(wx->mac_table); + wx_clear_interrupt_scheme(wx); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c new file mode 100644 index 000000000000..c9ddbbc3fa4f --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "ngbe_type.h" +#include "ngbe_mdio.h" + +static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + if (phy_addr != 0) + return 0xffff; + return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum)); +} + +static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + if (phy_addr == 0) + wr32(wx, NGBE_PHY_CONFIG(regnum), value); + return 0; +} + +static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(wx, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(wx, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 val, command; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(devnum); + wr32(wx, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) { + wx_err(wx, "Mdio read c45 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret, command; + u16 val; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(devnum); + wr32(wx, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) + wx_err(wx, "Mdio write c45 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + u16 phy_data; + + if (wx->mac_type == em_mac_type_mdi) + phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); + else + phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + + return phy_data; +} + +static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, + int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret; + + if (wx->mac_type == em_mac_type_mdi) + ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); + else + ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + + return ret; +} + +static void ngbe_handle_link_change(struct net_device *dev) +{ + struct wx *wx = netdev_priv(dev); + struct phy_device *phydev; + u32 lan_speed, reg; + + phydev = wx->phydev; + if (!(wx->link != phydev->link || + wx->speed != phydev->speed || + wx->duplex != phydev->duplex)) + return; + + wx->link = phydev->link; + wx->speed = phydev->speed; + wx->duplex = phydev->duplex; + switch (phydev->speed) { + case SPEED_10: + lan_speed = 0; + break; + case SPEED_100: + lan_speed = 1; + break; + case SPEED_1000: + default: + lan_speed = 2; + break; + } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); + + if (phydev->link) { + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + /* Re configure MAC RX */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + } + phy_print_status(phydev); +} + +int ngbe_phy_connect(struct wx *wx) +{ + int ret; + + ret = phy_connect_direct(wx->netdev, + wx->phydev, + ngbe_handle_link_change, + PHY_INTERFACE_MODE_RGMII_ID); + if (ret) { + wx_err(wx, "PHY connect failed.\n"); + return ret; + } + + return 0; +} + +static void ngbe_phy_fixup(struct wx *wx) +{ + struct phy_device *phydev = wx->phydev; + struct ethtool_eee eee; + + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + if (wx->mac_type != em_mac_type_mdi) + return; + /* disable EEE, internal phy does not support eee */ + memset(&eee, 0, sizeof(eee)); + phy_ethtool_set_eee(phydev, &eee); +} + +int ngbe_mdio_init(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + struct mii_bus *mii_bus; + int ret; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "ngbe_mii_bus"; + mii_bus->read = ngbe_phy_read_reg_c22; + mii_bus->write = ngbe_phy_write_reg_c22; + mii_bus->phy_mask = GENMASK(31, 4); + mii_bus->parent = &pdev->dev; + mii_bus->priv = wx; + + if (wx->mac_type == em_mac_type_rgmii) { + mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; + mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + } + + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", + (pdev->bus->number << 8) | pdev->devfn); + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) + return ret; + + wx->phydev = phy_find_first(mii_bus); + if (!wx->phydev) + return -ENODEV; + + phy_attached_info(wx->phydev); + ngbe_phy_fixup(wx); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h new file mode 100644 index 000000000000..0a6400dd89c4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _NGBE_MDIO_H_ +#define _NGBE_MDIO_H_ + +int ngbe_phy_connect(struct wx *wx); +int ngbe_mdio_init(struct wx *wx); +#endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 39f6c03f1a54..a2351349785e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -49,7 +49,6 @@ #define NGBE_SPI_ILDR_STATUS 0x10120 #define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ #define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ -#define NGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ /* Checksum and EEPROM pointers */ #define NGBE_CALSUM_COMMAND 0xE9 @@ -60,6 +59,25 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E +/* mdio access */ +#define NGBE_MSCA 0x11200 +#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v) +#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) +#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) +#define NGBE_MSCC 0x11204 +#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) + +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; + +#define NGBE_MSCC_SADDR BIT(18) +#define NGBE_MSCC_BUSY BIT(22) +#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) + /* Media-dependent registers. */ #define NGBE_MDIO_CLAUSE_SELECT 0x11220 @@ -72,6 +90,24 @@ #define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ #define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +/* Extended Interrupt Enable Set */ +#define NGBE_PX_MISC_IEN_DEV_RST BIT(10) +#define NGBE_PX_MISC_IEN_ETH_LK BIT(18) +#define NGBE_PX_MISC_IEN_INT_ERR BIT(20) +#define NGBE_PX_MISC_IEN_GPIO BIT(26) +#define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IEN_GPIO) + +#define NGBE_INTR_ALL 0x1FF +#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) + +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) +#define NGBE_CFG_LAN_SPEED 0x14440 +#define NGBE_CFG_PORT_ST 0x14404 + /* Wake up registers */ #define NGBE_PSR_WKUP_CTL 0x15B80 /* Wake Up Filter Control Bit */ @@ -90,50 +126,30 @@ #define NGBE_FW_CMD_ST_PASS 0x80658383 #define NGBE_FW_CMD_ST_FAIL 0x70657376 -enum ngbe_phy_type { - ngbe_phy_unknown = 0, - ngbe_phy_none, - ngbe_phy_internal, - ngbe_phy_m88e1512, - ngbe_phy_m88e1512_sfi, - ngbe_phy_m88e1512_unknown, - ngbe_phy_yt8521s, - ngbe_phy_yt8521s_sfi, - ngbe_phy_internal_yt8521s_sfi, - ngbe_phy_generic -}; +#define NGBE_MAX_FDIR_INDICES 7 -enum ngbe_media_type { - ngbe_media_type_unknown = 0, - ngbe_media_type_fiber, - ngbe_media_type_copper, - ngbe_media_type_backplane, -}; - -enum ngbe_mac_type { - ngbe_mac_type_unknown = 0, - ngbe_mac_type_mdi, - ngbe_mac_type_rgmii -}; +#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) +#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) -struct ngbe_phy_info { - enum ngbe_phy_type type; - enum ngbe_media_type media_type; +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 +#define NGBE_MAX_MSIX_VECTORS 0x09 +#define NGBE_RAR_ENTRIES 32 +#define NGBE_RX_PB_SIZE 42 +#define NGBE_MC_TBL_SIZE 128 +#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */ +#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ - u32 addr; - u32 id; +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 - bool reset_if_overtemp; +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 -}; - -struct ngbe_hw { - struct wx_hw wxhw; - struct ngbe_phy_info phy; - enum ngbe_mac_type mac_type; +extern char ngbe_driver_name[]; - bool wol_enabled; - bool ncsi_enabled; - bool gpio_ctrl; -}; #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 78484c58b78b..6db14a2cb2d0 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o txgbe-objs := txgbe_main.o \ - txgbe_hw.o + txgbe_hw.o \ + txgbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h deleted file mode 100644 index 19e61377bd00..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_H_ -#define _TXGBE_H_ - -#define TXGBE_MAX_FDIR_INDICES 63 - -#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) - -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 - -struct txgbe_mac_addr { - u8 addr[ETH_ALEN]; - u16 state; /* bitmask */ - u64 pools; -}; - -#define TXGBE_MAC_STATE_DEFAULT 0x1 -#define TXGBE_MAC_STATE_MODIFIED 0x2 -#define TXGBE_MAC_STATE_IN_USE 0x4 - -/* board specific private data structure */ -struct txgbe_adapter { - u8 __iomem *io_addr; /* Mainly for iounmap use */ - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in txgbe_type.h */ - struct txgbe_hw hw; - u16 msg_enable; - struct txgbe_mac_addr *mac_table; - char eeprom_id[32]; -}; - -extern char txgbe_driver_name[]; - -#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c new file mode 100644 index 000000000000..d914e9a05404 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include <linux/phylink.h> +#include <linux/netdevice.h> + +#include "../libwx/wx_ethtool.h" +#include "txgbe_ethtool.h" + +static const struct ethtool_ops txgbe_ethtool_ops = { + .get_drvinfo = wx_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +void txgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &txgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h new file mode 100644 index 000000000000..ace1b3571012 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_ETHTOOL_H_ +#define _TXGBE_ETHTOOL_H_ + +void txgbe_set_ethtool_ops(struct net_device *netdev); + +#endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 167f7ff73192..ebc46f3be056 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -12,70 +12,67 @@ #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_hw.h" -#include "txgbe.h" /** * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * * Inits the thermal sensor thresholds according to the NVM map * and save off the threshold and location values into mac.thermal_sensor_data **/ -static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +static void txgbe_init_thermal_sensor_thresh(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - struct wx_thermal_sensor_data *data = &wxhw->mac.sensor; + struct wx_thermal_sensor_data *data = &wx->mac.sensor; memset(data, 0, sizeof(struct wx_thermal_sensor_data)); /* Only support thermal sensors attached to SP physical port 0 */ - if (wxhw->bus.func) + if (wx->bus.func) return; - wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); - wr32(wxhw, WX_TS_INT_EN, + wr32(wx, WX_TS_INT_EN, WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); - wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA); + wr32(wx, WX_TS_EN, WX_TS_EN_ENA); data->alarm_thresh = 100; - wr32(wxhw, WX_TS_ALARM_THRE, 677); + wr32(wx, WX_TS_ALARM_THRE, 677); data->dalarm_thresh = 90; - wr32(wxhw, WX_TS_DALARM_THRE, 614); + wr32(wx, WX_TS_DALARM_THRE, 614); } /** * txgbe_read_pba_string - Reads part number string from EEPROM - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ -int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) { u16 pba_ptr, offset, length, data; - struct wx_hw *wxhw = &hw->wxhw; int ret_val; if (!pba_num) { - wx_err(wxhw, "PBA string buffer was null\n"); + wx_err(wx, "PBA string buffer was null\n"); return -EINVAL; } - ret_val = wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, &data); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } - ret_val = wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } @@ -84,11 +81,11 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) * and we can decode it into an ascii string */ if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wxhw, "NVM PBA number is not stored as string\n"); + wx_err(wx, "NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { - wx_err(wxhw, "PBA string buffer too small\n"); + wx_err(wx, "PBA string buffer too small\n"); return -ENOMEM; } @@ -118,20 +115,20 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) return 0; } - ret_val = wx_read_ee_hostif(wxhw, pba_ptr, &length); + ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { - wx_err(wxhw, "NVM PBA number section invalid length\n"); + wx_err(wx, "NVM PBA number section invalid length\n"); return -EINVAL; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wxhw, "PBA string buffer too small\n"); + wx_err(wx, "PBA string buffer too small\n"); return -ENOMEM; } @@ -140,9 +137,9 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) length--; for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wxhw, pba_ptr + offset, &data); + ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); @@ -155,14 +152,13 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @checksum: pointer to cheksum * * Returns a negative error code on error **/ -static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) +static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) { - struct wx_hw *wxhw = &hw->wxhw; u16 *eeprom_ptrs = NULL; u32 buffer_size = 0; u16 *buffer = NULL; @@ -170,7 +166,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) int status; u16 i; - wx_init_eeprom_params(wxhw); + wx_init_eeprom_params(wx); if (!buffer) { eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), @@ -178,11 +174,11 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) if (!eeprom_ptrs) return -ENOMEM; /* Read pointer area */ - status = wx_read_ee_hostif_buffer(wxhw, 0, + status = wx_read_ee_hostif_buffer(wx, 0, TXGBE_EEPROM_LAST_WORD, eeprom_ptrs); if (status != 0) { - wx_err(wxhw, "Failed to read EEPROM image\n"); + wx_err(wx, "Failed to read EEPROM image\n"); kvfree(eeprom_ptrs); return status; } @@ -194,7 +190,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) } for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) - if (i != wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) *checksum += local_buffer[i]; if (eeprom_ptrs) @@ -210,15 +206,14 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) /** * txgbe_validate_eeprom_checksum - Validate EEPROM checksum - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val) { - struct wx_hw *wxhw = &hw->wxhw; u16 read_checksum = 0; u16 checksum; int status; @@ -227,18 +222,18 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) * not continue or we could be in for a very long wait while every * EEPROM read fails */ - status = wx_read_ee_hostif(wxhw, 0, &checksum); + status = wx_read_ee_hostif(wx, 0, &checksum); if (status) { - wx_err(wxhw, "EEPROM read failed\n"); + wx_err(wx, "EEPROM read failed\n"); return status; } checksum = 0; - status = txgbe_calc_eeprom_checksum(hw, &checksum); + status = txgbe_calc_eeprom_checksum(wx, &checksum); if (status != 0) return status; - status = wx_read_ee_hostif(wxhw, wxhw->eeprom.sw_region_offset + + status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM, &read_checksum); if (status != 0) return status; @@ -248,7 +243,7 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) */ if (read_checksum != checksum) { status = -EIO; - wx_err(wxhw, "Invalid EEPROM checksum\n"); + wx_err(wx, "Invalid EEPROM checksum\n"); } /* If the user cares, return the calculated checksum */ @@ -258,55 +253,52 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) return status; } -static void txgbe_reset_misc(struct txgbe_hw *hw) +static void txgbe_reset_misc(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - - wx_reset_misc(wxhw); - txgbe_init_thermal_sensor_thresh(hw); + wx_reset_misc(wx); + txgbe_init_thermal_sensor_thresh(wx); } /** * txgbe_reset_hw - Perform hardware reset - * @hw: pointer to hardware structure + * @wx: pointer to wx structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -int txgbe_reset_hw(struct txgbe_hw *hw) +int txgbe_reset_hw(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; int status; /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wxhw); + status = wx_stop_adapter(wx); if (status != 0) return status; - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) - wx_reset_hostif(wxhw); + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) + wx_reset_hostif(wx); usleep_range(10, 100); - status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func)); + status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); if (status != 0) return status; - txgbe_reset_misc(hw); + txgbe_reset_misc(wx); /* Store the permanent mac address */ - wx_get_mac_addr(wxhw, wxhw->mac.perm_addr); + wx_get_mac_addr(wx, wx->mac.perm_addr); /* Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx_init_rx_addrs(wxhw); + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx_init_rx_addrs(wx); - pci_set_master(wxhw->pdev); + pci_set_master(wx->pdev); return 0; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index 6a751a69177b..e82f65dff8a6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -4,8 +4,8 @@ #ifndef _TXGBE_HW_H_ #define _TXGBE_HW_H_ -int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size); -int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val); -int txgbe_reset_hw(struct txgbe_hw *hw); +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); +int txgbe_reset_hw(struct wx *wx); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 36780e7f05b7..6c0a98230557 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -11,10 +11,11 @@ #include <net/ip.h> #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_hw.h" -#include "txgbe.h" +#include "txgbe_ethtool.h" char txgbe_driver_name[] = "txgbe"; @@ -35,26 +36,26 @@ static const struct pci_device_id txgbe_pci_tbl[] = { #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -static void txgbe_check_minimum_link(struct txgbe_adapter *adapter) +static void txgbe_check_minimum_link(struct wx *wx) { struct pci_dev *pdev; - pdev = adapter->pdev; + pdev = wx->pdev; pcie_print_link_status(pdev); } /** * txgbe_enumerate_functions - Get the number of ports this device has - * @adapter: adapter structure + * @wx: wx structure * * This function enumerates the phsyical functions co-located on a single slot, * in order to determine how many ports a device has. This is most useful in * determining the required GT/s of PCIe bandwidth necessary for optimal * performance. **/ -static int txgbe_enumerate_functions(struct txgbe_adapter *adapter) +static int txgbe_enumerate_functions(struct wx *wx) { - struct pci_dev *entry, *pdev = adapter->pdev; + struct pci_dev *entry, *pdev = wx->pdev; int physfns = 0; list_for_each_entry(entry, &pdev->bus->devices, bus_list) { @@ -73,196 +74,299 @@ static int txgbe_enumerate_functions(struct txgbe_adapter *adapter) return physfns; } -static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) +/** + * txgbe_irq_enable - Enable default interrupt generation settings + * @wx: pointer to private structure + * @queues: enable irqs for queues + **/ +static void txgbe_irq_enable(struct wx *wx, bool queues) { - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; - int i; - - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { - if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { - wx_set_rar(wxhw, i, - adapter->mac_table[i].addr, - adapter->mac_table[i].pools, - WX_PSR_MAC_SWC_AD_H_AV); - } else { - wx_clear_rar(wxhw, i); - } - adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); - } - } + /* unmask interrupt */ + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + if (queues) + wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } -/* this function destroys the first RAR entry */ -static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, - u8 *addr) +/** + * txgbe_intr - msi/legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t txgbe_intr(int __always_unused irq, void *data) { - struct wx_hw *wxhw = &adapter->hw.wxhw; - - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].pools = 1ULL; - adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | - TXGBE_MAC_STATE_IN_USE); - wx_set_rar(wxhw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].pools, - WX_PSR_MAC_SWC_AD_H_AV); + struct wx_q_vector *q_vector; + struct wx *wx = data; + struct pci_dev *pdev; + u32 eicr; + + q_vector = wx->q_vector[0]; + pdev = wx->pdev; + + eicr = wx_misc_isb(wx, WX_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the ICR read. + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, true); + return IRQ_NONE; /* Not our interrupt */ + } + wx->isb_mem[WX_ISB_VEC0] = 0; + if (!(pdev->msi_enabled)) + wr32(wx, WX_PX_INTA, 1); + + wx->isb_mem[WX_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* re-enable link(maybe) and non-queue interrupts, no flush. + * txgbe_poll will re-enable the queue interrupts + */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, false); + + return IRQ_HANDLED; } -static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) +static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) { - struct wx_hw *wxhw = &adapter->hw.wxhw; - u32 i; + struct wx *wx = data; - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].pools = 0; - } - txgbe_sync_mac_table(adapter); + /* re-enable the original interrupt state */ + if (netif_running(wx->netdev)) + txgbe_irq_enable(wx, false); + + return IRQ_HANDLED; } -static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @wx: board private structure + * + * Allocate MSI-X vectors and request interrupts from the kernel. + **/ +static int txgbe_request_msix_irqs(struct wx *wx) { - struct wx_hw *wxhw = &adapter->hw.wxhw; - u32 i; + struct net_device *netdev = wx->netdev; + int vector, err; + + for (vector = 0; vector < wx->num_q_vectors; vector++) { + struct wx_q_vector *q_vector = wx->q_vector[vector]; + struct msix_entry *entry = &wx->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, entry->entry); + else + /* skip this unused q_vector */ + continue; - if (is_zero_ether_addr(addr)) - return -EINVAL; - - /* search table for addr, if found, set to 0 and sync */ - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { - if (adapter->mac_table[i].pools & (1ULL << pool)) { - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - adapter->mac_table[i].pools &= ~(1ULL << pool); - txgbe_sync_mac_table(adapter); - } - return 0; + err = request_irq(entry->vector, wx_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; } + } - if (adapter->mac_table[i].pools != (1 << pool)) - continue; - if (!ether_addr_equal(addr, adapter->mac_table[i].addr)) - continue; + err = request_irq(wx->msix_entries[vector].vector, + txgbe_msix_other, 0, netdev->name, wx); + if (err) { + wx_err(wx, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].pools = 0; - txgbe_sync_mac_table(adapter); - return 0; +free_queue_irqs: + while (vector) { + vector--; + free_irq(wx->msix_entries[vector].vector, + wx->q_vector[vector]); } - return -ENOMEM; + wx_reset_interrupt_capability(wx); + return err; } -static void txgbe_up_complete(struct txgbe_adapter *adapter) +/** + * txgbe_request_irq - initialize interrupts + * @wx: board private structure + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int txgbe_request_irq(struct wx *wx) { - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct net_device *netdev = wx->netdev; + struct pci_dev *pdev = wx->pdev; + int err; - wx_control_hw(wxhw, true); + if (pdev->msix_enabled) + err = txgbe_request_msix_irqs(wx); + else if (pdev->msi_enabled) + err = request_irq(wx->pdev->irq, &txgbe_intr, 0, + netdev->name, wx); + else + err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, + netdev->name, wx); + + if (err) + wx_err(wx, "request_irq failed, Error %d\n", err); + + return err; } -static void txgbe_reset(struct txgbe_adapter *adapter) +static void txgbe_up_complete(struct wx *wx) { - struct net_device *netdev = adapter->netdev; - struct txgbe_hw *hw = &adapter->hw; + u32 reg; + + wx_control_hw(wx, true); + wx_configure_vectors(wx); + + /* make sure to complete pre-operations */ + smp_mb__before_atomic(); + wx_napi_enable_all(wx); + + /* clear any pending interrupts, may auto mask */ + rd32(wx, WX_PX_IC); + rd32(wx, WX_PX_MISC_IC); + txgbe_irq_enable(wx, true); + + /* Configure MAC Rx and Tx when link is up */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + reg = rd32(wx, WX_MAC_TX_CFG); + wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G); + + /* enable transmits */ + netif_tx_start_all_queues(wx->netdev); + netif_carrier_on(wx->netdev); +} + +static void txgbe_reset(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; u8 old_addr[ETH_ALEN]; int err; - err = txgbe_reset_hw(hw); + err = txgbe_reset_hw(wx); if (err != 0) - dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); + wx_err(wx, "Hardware Error: %d\n", err); /* do not flush user set addresses */ - memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); - txgbe_flush_sw_mac_table(adapter); - txgbe_mac_set_default_filter(adapter, old_addr); + memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, old_addr); } -static void txgbe_disable_device(struct txgbe_adapter *adapter) +static void txgbe_disable_device(struct wx *wx) { - struct net_device *netdev = adapter->netdev; - struct wx_hw *wxhw = &adapter->hw.wxhw; + struct net_device *netdev = wx->netdev; + u32 i; - wx_disable_pcie_master(wxhw); + wx_disable_pcie_master(wx); /* disable receives */ - wx_disable_rx(wxhw); + wx_disable_rx(wx); + + /* disable all enabled rx queues */ + for (i = 0; i < wx->num_rx_queues; i++) + /* this call also flushes the previous write */ + wx_disable_rx_queue(wx, wx->rx_ring[i]); + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); netif_tx_disable(netdev); - if (wxhw->bus.func < 2) - wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0); + wx_irq_disable(wx); + wx_napi_disable_all(wx); + + if (wx->bus.func < 2) + wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); else - dev_err(&adapter->pdev->dev, - "%s: invalid bus lan id %d\n", - __func__, wxhw->bus.func); + wx_err(wx, "%s: invalid bus lan id %d\n", + __func__, wx->bus.func); - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac transmiter */ - wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < wx->num_tx_queues; i++) { + u8 reg_idx = wx->tx_ring[i]->reg_idx; + + wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } /* Disable the Tx DMA engine */ - wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); } -static void txgbe_down(struct txgbe_adapter *adapter) +static void txgbe_down(struct wx *wx) { - txgbe_disable_device(adapter); - txgbe_reset(adapter); + txgbe_disable_device(wx); + txgbe_reset(wx); + + wx_clean_all_tx_rings(wx); + wx_clean_all_rx_rings(wx); } /** - * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) - * @adapter: board private structure to initialize + * txgbe_sw_init - Initialize general software structures (struct wx) + * @wx: board private structure to initialize **/ -static int txgbe_sw_init(struct txgbe_adapter *adapter) +static int txgbe_sw_init(struct wx *wx) { - struct pci_dev *pdev = adapter->pdev; - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + u16 msix_count = 0; int err; - wxhw->hw_addr = adapter->io_addr; - wxhw->pdev = pdev; + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; + wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; + wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; /* PCI config space info */ - err = wx_sw_init(wxhw); + err = wx_sw_init(wx); if (err < 0) { - netif_err(adapter, probe, adapter->netdev, - "read of internal subsystem device id failed\n"); + wx_err(wx, "read of internal subsystem device id failed\n"); return err; } - switch (wxhw->device_id) { + switch (wx->device_id) { case TXGBE_DEV_ID_SP1000: case TXGBE_DEV_ID_WX1820: - wxhw->mac.type = wx_mac_sp; + wx->mac.type = wx_mac_sp; break; default: - wxhw->mac.type = wx_mac_unknown; + wx->mac.type = wx_mac_unknown; break; } - wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - - adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries, - sizeof(struct txgbe_mac_addr), - GFP_KERNEL); - if (!adapter->mac_table) { - netif_err(adapter, probe, adapter->netdev, - "mac_table allocation failed\n"); - return -ENOMEM; - } + /* Set common capability flags and settings */ + wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; + err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); + if (err) + wx_err(wx, "Do not support MSI-X\n"); + wx->mac.max_msix_vectors = msix_count; + + /* enable itr by default in dynamic mode */ + wx->rx_itr_setting = 1; + wx->tx_itr_setting = 1; + + /* set default ring sizes */ + wx->tx_ring_count = TXGBE_DEFAULT_TXD; + wx->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* set default work limits */ + wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; + wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; return 0; } @@ -278,23 +382,53 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) **/ static int txgbe_open(struct net_device *netdev) { - struct txgbe_adapter *adapter = netdev_priv(netdev); + struct wx *wx = netdev_priv(netdev); + int err; + + err = wx_setup_resources(wx); + if (err) + goto err_reset; + + wx_configure(wx); - txgbe_up_complete(adapter); + err = txgbe_request_irq(wx); + if (err) + goto err_free_isb; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); + if (err) + goto err_free_irq; + + err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); + if (err) + goto err_free_irq; + + txgbe_up_complete(wx); return 0; + +err_free_irq: + wx_free_irq(wx); +err_free_isb: + wx_free_isb_resources(wx); +err_reset: + txgbe_reset(wx); + + return err; } /** * txgbe_close_suspend - actions necessary to both suspend and close flows - * @adapter: the private adapter struct + * @wx: the private wx struct * * This function should contain the necessary work common to both suspending * and closing of the device. */ -static void txgbe_close_suspend(struct txgbe_adapter *adapter) +static void txgbe_close_suspend(struct wx *wx) { - txgbe_disable_device(adapter); + txgbe_disable_device(wx); + wx_free_resources(wx); } /** @@ -310,29 +444,30 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter) **/ static int txgbe_close(struct net_device *netdev) { - struct txgbe_adapter *adapter = netdev_priv(netdev); + struct wx *wx = netdev_priv(netdev); - txgbe_down(adapter); - wx_control_hw(&adapter->hw.wxhw, false); + txgbe_down(wx); + wx_free_irq(wx); + wx_free_resources(wx); + wx_control_hw(wx, false); return 0; } static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) { - struct txgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + netdev = wx->netdev; netif_device_detach(netdev); rtnl_lock(); if (netif_running(netdev)) - txgbe_close_suspend(adapter); + txgbe_close_suspend(wx); rtnl_unlock(); - wx_control_hw(wxhw, false); + wx_control_hw(wx, false); pci_disable_device(pdev); } @@ -349,45 +484,14 @@ static void txgbe_shutdown(struct pci_dev *pdev) } } -static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - return NETDEV_TX_OK; -} - -/** - * txgbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int txgbe_set_mac(struct net_device *netdev, void *p) -{ - struct txgbe_adapter *adapter = netdev_priv(netdev); - struct wx_hw *wxhw = &adapter->hw.wxhw; - struct sockaddr *addr = p; - int retval; - - retval = eth_prepare_mac_addr_change(netdev, addr); - if (retval) - return retval; - - txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0); - eth_hw_addr_set(netdev, addr->sa_data); - memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len); - - txgbe_mac_set_default_filter(adapter, wxhw->mac.addr); - - return 0; -} - static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, - .ndo_start_xmit = txgbe_xmit_frame, + .ndo_start_xmit = wx_xmit_frame, + .ndo_set_rx_mode = wx_set_rx_mode, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = txgbe_set_mac, + .ndo_set_mac_address = wx_set_mac, + .ndo_get_stats64 = wx_get_stats64, }; /** @@ -398,17 +502,15 @@ static const struct net_device_ops txgbe_netdev_ops = { * Returns 0 on success, negative on failure * * txgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, + * The OS initialization, configuring of the wx private structure, * and a hardware reset occur. **/ static int txgbe_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { - struct txgbe_adapter *adapter = NULL; - struct txgbe_hw *hw = NULL; - struct wx_hw *wxhw = NULL; struct net_device *netdev; int err, expected_gts; + struct wx *wx = NULL; u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; @@ -440,7 +542,7 @@ static int txgbe_probe(struct pci_dev *pdev, pci_set_master(pdev); netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct txgbe_adapter), + sizeof(struct wx), TXGBE_MAX_TX_QUEUES, TXGBE_MAX_RX_QUEUES); if (!netdev) { @@ -450,81 +552,96 @@ static int txgbe_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - wxhw = &hw->wxhw; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - adapter->io_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!adapter->io_addr) { + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + + wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { err = -EIO; goto err_pci_release_regions; } + wx->driver_name = txgbe_driver_name; + txgbe_set_ethtool_ops(netdev); netdev->netdev_ops = &txgbe_netdev_ops; /* setup the private structure */ - err = txgbe_sw_init(adapter); + err = txgbe_sw_init(wx); if (err) goto err_free_mac_table; /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST); + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); if (err) goto err_free_mac_table; - err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST); + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); if (err) goto err_free_mac_table; - err = wx_mng_present(wxhw); + err = wx_mng_present(wx); if (err) { dev_err(&pdev->dev, "Management capability is not present\n"); goto err_free_mac_table; } - err = txgbe_reset_hw(hw); + err = txgbe_reset_hw(wx); if (err) { dev_err(&pdev->dev, "HW Init failed: %d\n", err); goto err_free_mac_table; } netdev->features |= NETIF_F_HIGHDMA; + netdev->features = NETIF_F_SG; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | NETIF_F_RXALL; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); /* make sure the EEPROM is good */ - err = txgbe_validate_eeprom_checksum(hw, NULL); + err = txgbe_validate_eeprom_checksum(wx, NULL); if (err != 0) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); - wr32(wxhw, WX_MIS_RST, WX_MIS_RST_SW_RST); + wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); err = -EIO; goto err_free_mac_table; } - eth_hw_addr_set(netdev, wxhw->mac.perm_addr); - txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr); + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); + + err = wx_init_interrupt_scheme(wx); + if (err) + goto err_free_mac_table; /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom */ - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, &eeprom_verh); - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, &eeprom_verl); etrack_id = (eeprom_verh << 16) | eeprom_verl; - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset); /* Make sure offset to SCSI block is valid */ if (!(offset == 0x0) && !(offset == 0xffff)) { - wx_read_ee_hostif(wxhw, offset + 0x84, &eeprom_cfg_blkh); - wx_read_ee_hostif(wxhw, offset + 0x83, &eeprom_cfg_blkl); + wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); + wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); /* Only display Option Rom if exist */ if (eeprom_cfg_blkl && eeprom_cfg_blkh) { @@ -532,15 +649,15 @@ static int txgbe_probe(struct pci_dev *pdev, build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); patch = eeprom_cfg_blkh & 0x00ff; - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x, %d.%d.%d", etrack_id, major, build, patch); } else { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x", etrack_id); } } else { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x", etrack_id); } @@ -548,7 +665,9 @@ static int txgbe_probe(struct pci_dev *pdev, if (err) goto err_release_hw; - pci_set_drvdata(pdev, adapter); + pci_set_drvdata(pdev, wx); + + netif_tx_stop_all_queues(netdev); /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough @@ -556,27 +675,28 @@ static int txgbe_probe(struct pci_dev *pdev, * parts to ensure that no warning is displayed, as this could confuse * users otherwise. */ - expected_gts = txgbe_enumerate_functions(adapter) * 10; + expected_gts = txgbe_enumerate_functions(wx) * 10; /* don't check link if we failed to enumerate functions */ if (expected_gts > 0) - txgbe_check_minimum_link(adapter); + txgbe_check_minimum_link(wx); else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); /* First try to read PBA as a string */ - err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH); + err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); if (err) strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr); + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); return 0; err_release_hw: - wx_control_hw(wxhw, false); + wx_clear_interrupt_scheme(wx); + wx_control_hw(wx, false); err_free_mac_table: - kfree(adapter->mac_table); + kfree(wx->mac_table); err_pci_release_regions: pci_disable_pcie_error_reporting(pdev); pci_release_selected_regions(pdev, @@ -597,16 +717,17 @@ err_pci_disable_dev: **/ static void txgbe_remove(struct pci_dev *pdev) { - struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); struct net_device *netdev; - netdev = adapter->netdev; + netdev = wx->netdev; unregister_netdev(netdev); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(adapter->mac_table); + kfree(wx->mac_table); + wx_clear_interrupt_scheme(wx); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 740a1c447e20..563ea51deca6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -67,8 +67,37 @@ #define TXGBE_PBANUM1_PTR 0x06 #define TXGBE_PBANUM_PTR_GUARD 0xFAFA -struct txgbe_hw { - struct wx_hw wxhw; -}; +#define TXGBE_MAX_MSIX_VECTORS 64 +#define TXGBE_MAX_FDIR_INDICES 63 + +#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_RX_PB_SIZE 512 +#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ + +/* TX/RX descriptor defines */ +#define TXGBE_DEFAULT_TXD 512 +#define TXGBE_DEFAULT_TX_WORK 256 + +#if (PAGE_SIZE < 8192) +#define TXGBE_DEFAULT_RXD 512 +#define TXGBE_DEFAULT_RX_WORK 256 +#else +#define TXGBE_DEFAULT_RXD 256 +#define TXGBE_DEFAULT_RX_WORK 128 +#endif + +#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) + +#define TXGBE_MAX_EITR GENMASK(11, 3) + +extern char txgbe_driver_name[]; #endif /* _TXGBE_TYPE_H_ */ |