diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-13 17:54:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-11-13 17:54:08 -0800 |
commit | 5cf52037042d3ad7432df1aec004a935e83939a6 (patch) | |
tree | f66190e4a21171d08417bc9815df75dd75c49dff /drivers | |
parent | 971ad4e4d6833d5f250d0db332ff863c599ae19f (diff) | |
parent | 19ca9fc1445b76b60d34148f7ff837b055f5dcf3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) sunhme driver lacks DMA mapping error checks, based upon a report by
Meelis Roos.
2) Fix memory leak in mvpp2 driver, from Sudip Mukherjee.
3) DMA memory allocation sizes are wrong in systemport ethernet driver,
fix from Florian Fainelli.
4) Fix use after free in mac80211 defragmentation code, from Johannes
Berg.
5) Some networking uapi headers missing from Kbuild file, from Stephen
Hemminger.
6) TUN driver gets csum_start offset wrong when VLAN accel is enabled,
and macvtap has a similar bug, from Herbert Xu.
7) Adjust several tunneling drivers to set dev->iflink after registry,
because registry sets that to -1 overwriting whatever we did. From
Steffen Klassert.
8) Geneve forgets to set inner tunneling type, causing GSO segmentation
to fail on some NICs. From Jesse Gross.
9) Fix several locking bugs in stmmac driver, from Fabrice Gasnier and
Giuseppe CAVALLARO.
10) Fix spurious timeouts with NewReno on low traffic connections, from
Marcelo Leitner.
11) Fix descriptor updates in enic driver, from Govindarajulu
Varadarajan.
12) PPP calls bpf_prog_create() with locks held, which isn't kosher.
Fix from Takashi Iwai.
13) Fix NULL deref in SCTP with malformed INIT packets, from Daniel
Borkmann.
14) psock_fanout selftest accesses past the end of the mmap ring, fix
from Shuah Khan.
15) Fix PTP timestamping for VLAN packets, from Richard Cochran.
16) netlink_unbind() calls in netlink pass wrong initial argument, from
Hiroaki SHIMODA.
17) vxlan socket reuse accidently reuses a socket when the address
family is different, so we have to explicitly check this, from
Marcelo Lietner.
18) Fix missing include in nft_reject_bridge.c breaking the build on ppc
and other architectures, from Guenter Roeck.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits)
vxlan: Do not reuse sockets for a different address family
smsc911x: power-up phydev before doing a software reset.
lib: rhashtable - Remove weird non-ASCII characters from comments
net/smsc911x: Fix delays in the PHY enable/disable routines
net/smsc911x: Fix rare soft reset timeout issue due to PHY power-down mode
netlink: Properly unbind in error conditions.
net: ptp: fix time stamp matching logic for VLAN packets.
cxgb4 : dcb open-lldp interop fixes
selftests/net: psock_fanout seg faults in sock_fanout_read_ring()
net: bcmgenet: apply MII configuration in bcmgenet_open()
net: bcmgenet: connect and disconnect from the PHY state machine
net: qualcomm: Fix dependency
ixgbe: phy: fix uninitialized status in ixgbe_setup_phy_link_tnx
net: phy: Correctly handle MII ioctl which changes autonegotiation.
ipv6: fix IPV6_PKTINFO with v4 mapped
net: sctp: fix memory leak in auth key management
net: sctp: fix NULL pointer dereference in af->from_addr_param on malformed packet
net: ppp: Don't call bpf_prog_create() in ppp_lock
net/mlx4_en: Advertize encapsulation offloads features only when VXLAN tunnel is set
cxgb4 : Fix bug in DCB app deletion
...
Diffstat (limited to 'drivers')
48 files changed, 653 insertions, 266 deletions
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 63ea1941e973..7ba83ffb08ac 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) +{ + if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) + return false; + + if (ioread32(p->ring_csr_addr + SRST_ADDR)) + return false; + + return true; +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { u32 val; + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); @@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata) val |= SCAN_AUTO_INCR; MGMT_CLOCK_SEL_SET(&val, 1); xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + + return 0; } static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 38558584080e..ec45f3256f0e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -104,6 +104,9 @@ enum xgene_enet_rm { #define BLOCK_ETH_MAC_OFFSET 0x0000 #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 +#define CLKEN_ADDR 0xc208 +#define SRST_ADDR 0xc200 + #define MAC_ADDR_REG_OFFSET 0x00 #define MAC_COMMAND_REG_OFFSET 0x04 #define MAC_WRITE_REG_OFFSET 0x08 @@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); extern struct xgene_mac_ops xgene_gmac_ops; extern struct xgene_port_ops xgene_gport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3c208cc6f6bb..123669696184 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; - u8 cpu_bufnum = 0, eth_bufnum = 0; - u8 bp_bufnum = 0x20; - u16 ring_id, ring_num = 0; + u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; + u8 bp_bufnum = START_BP_BUFNUM; + u16 ring_id, ring_num = START_RING_NUM; int ret; /* allocate rx descriptor ring */ @@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) u16 dst_ring_num; int ret; - pdata->port_ops->reset(pdata); + ret = pdata->port_ops->reset(pdata); + if (ret) + return ret; ret = xgene_enet_create_desc_rings(ndev); if (ret) { @@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev) return ret; err: + unregister_netdev(ndev); free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 874e5a01161f..f9958fae6ffd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -38,6 +38,9 @@ #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 +#define START_ETH_BUFNUM 2 +#define START_BP_BUFNUM 0x22 +#define START_RING_NUM 8 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -83,7 +86,7 @@ struct xgene_mac_ops { }; struct xgene_port_ops { - void (*reset)(struct xgene_enet_pdata *pdata); + int (*reset)(struct xgene_enet_pdata *pdata); void (*cle_bypass)(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index c22f32622fa9..f5d4f68c288c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) xgene_sgmac_rxtx(p, TX_EN, false); } -static void xgene_enet_reset(struct xgene_enet_pdata *p) +static int xgene_enet_reset(struct xgene_enet_pdata *p) { + if (!xgene_ring_mgr_init(p)) + return -ENODEV; + clk_prepare_enable(p->clk); clk_disable_unprepare(p->clk); clk_prepare_enable(p->clk); xgene_enet_ecc_init(p); xgene_enet_config_ring_if_assoc(p); + + return 0; } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 67d07206b3c7..a18a9d1f1143 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); + + return 0; } static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 3a6778a667f4..531bb7c57531 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* We just need one DMA descriptor which is DMA-able, since writing to * the port will allocate a new descriptor in its internal linked-list */ - p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, + GFP_KERNEL); if (!p) { netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); return -ENOMEM; @@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, if (!(reg & TDMA_DISABLED)) netdev_warn(priv->netdev, "TDMA not stopped!\n"); + /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could + * fail, so by checking this pointer we know whether the TX ring was + * fully initialized or not. + */ + if (!ring->cbs) + return; + napi_disable(&ring->napi); netif_napi_del(&ring->napi); @@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, ring->cbs = NULL; if (ring->desc_dma) { - dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); ring->desc_dma = 0; } ring->size = 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fdc9ec09e453..da1a2500c91c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } + /* Re-configure the port multiplexer towards the PHY device */ + bcmgenet_mii_config(priv->dev, false); + + phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, + priv->phy_interface); + bcmgenet_netif_start(dev); return 0; @@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev) bcmgenet_netif_stop(dev); + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(priv->phydev); + /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); + bcmgenet_mii_config(priv->dev, false); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbf524ea3b19..31b2da5f9b82 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); +void bcmgenet_mii_setup(struct net_device *dev); /* Wake-on-LAN routines */ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ff799a9f801..933cd7e7cd33 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ -static void bcmgenet_mii_setup(struct net_device *dev) +void bcmgenet_mii_setup(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); } -int bcmgenet_mii_config(struct net_device *dev) +int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev) return -EINVAL; } - dev_info(kdev, "configuring instance for %s\n", phy_name); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); return 0; } @@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev); + ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(priv->phydev); return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6fe300e316c3..cca604994003 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev) app.protocol = dcb->app_priority[i].protocolid; if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.priority = dcb->app_priority[i].user_prio_map; app.selector = dcb->app_priority[i].sel_field + 1; - err = dcb_ieee_setapp(dev, &app); + err = dcb_ieee_delapp(dev, &app); } else { app.selector = !!(dcb->app_priority[i].sel_field); err = dcb_setapp(dev, &app); @@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're going to use Firmware DCB */ dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; - dcb->supported = CXGB4_DCBX_FW_SUPPORT; + dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + else + dcb->supported |= DCB_CAP_DCBX_VER_CEE; break; } @@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc, *up_tc_map = (1 << tc); /* prio_type is link strict */ - *prio_type = 0x2; + if (*pgid != 0xF) + *prio_type = 0x2; } static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 1); } @@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 0); } static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, @@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, struct fw_port_cmd pcmd; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = pi->adapter; + int fw_tc = 7 - tc; u32 _pgid; int err; @@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, } _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); - _pgid &= ~(0xF << (tc * 4)); - _pgid |= pgid << (tc * 4); + _pgid &= ~(0xF << (fw_tc * 4)); + _pgid |= pgid << (fw_tc * 4); pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); @@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else - *pfccfg = (pi->dcb.pfcen >> priority) & 1; + *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; } /* Enable/disable Priority Pause Frames for the specified Traffic Class @@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; if (pfccfg) - pcmd.u.dcb.pfc.pfcen |= (1 << priority); + pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); else - pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); + pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 5e1b314e11af..39f2b13e66c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_conm_ctrl; + u32 sge_control, sge_control2, sge_conm_ctrl; + unsigned int ingpadboundary, ingpackboundary; int ret, egress_threshold; /* @@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap) sge_control = t4_read_reg(adap, SGE_CONTROL); s->pktshift = PKTSHIFT_GET(sge_control); s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; - s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + - X_INGPADBOUNDARY_SHIFT); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adap->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } if (adap->flags & USING_SOFT_PARAMS) ret = t4_sge_init_soft(adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a9d9d74e4f09..163a2a14948c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF6(sge_hps) | HOSTPAGESIZEPF7(sge_hps)); - t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY_MASK | - EGRSTATUSPAGESIZE_MASK, - INGPADBOUNDARY(fl_align_log - 5) | - EGRSTATUSPAGESIZE(stat_len != 64)); - + if (is_t4(adap->params.chip)) { + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(fl_align_log - 5) | + EGRSTATUSPAGESIZE(stat_len != 64)); + } else { + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + * + * T5 has a different interpretation of the "0" value for the + * Packing Boundary. This corresponds to 16 bytes instead of + * the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes. + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | + EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(fl_align_log - + INGPACKBOUNDARY_SHIFT_X)); + } /* * Adjust various SGE Free List Host Buffer Sizes. * diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a1024db5dc13..8d2de1006b08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -95,6 +95,7 @@ #define X_INGPADBOUNDARY_SHIFT 5 #define SGE_CONTROL 0x1008 +#define SGE_CONTROL2_A 0x1124 #define DCASYSTYPE 0x00080000U #define RXPKTCPLMODE_MASK 0x00040000U #define RXPKTCPLMODE_SHIFT 18 @@ -106,6 +107,7 @@ #define PKTSHIFT_SHIFT 10 #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_32B_X 0 #define INGPCIEBOUNDARY_MASK 0x00000380U #define INGPCIEBOUNDARY_SHIFT 7 #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) @@ -114,6 +116,14 @@ #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ >> INGPADBOUNDARY_SHIFT) +#define INGPACKBOUNDARY_16B_X 0 +#define INGPACKBOUNDARY_SHIFT_X 5 + +#define INGPACKBOUNDARY_S 16 +#define INGPACKBOUNDARY_M 0x7U +#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) +#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ + & INGPACKBOUNDARY_M) #define EGRPCIEBOUNDARY_MASK 0x0000000eU #define EGRPCIEBOUNDARY_SHIFT 1 #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7d..3d06e77d7121 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -299,6 +299,14 @@ struct sge { u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + /* Decoded Adapter Parameters. + */ + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ + /* * Reverse maps from Absolute Queue IDs to associated queue pointers. * The absolute Queue IDs are in a compact range which start at a diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 85036e6b42c4..fdd078d7d82c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -51,14 +51,6 @@ #include "../cxgb4/t4_msg.h" /* - * Decoded Adapter Parameters. - */ -static u32 FL_PG_ORDER; /* large page allocation size */ -static u32 STAT_LEN; /* length of status page at ring end */ -static u32 PKTSHIFT; /* padding between CPL and packet data */ -static u32 FL_ALIGN; /* response queue message alignment */ - -/* * Constants ... */ enum { @@ -102,12 +94,6 @@ enum { MAX_TIMER_TX_RECLAIM = 100, /* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic - * timer will attempt to refill it. - */ - FL_STARVE_THRES = 4, - - /* * Suspend an Ethernet TX queue with fewer available descriptors than * this. We always want to have room for a maximum sized packet: * inline immediate data + MAX_SKB_FRAGS. This is the same as @@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) /** * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter * @fl: the Free List * * Tests specified Free List to see whether the number of buffers * available to the hardware has falled below our "starvation" * threshold. */ -static inline bool fl_starving(const struct sge_fl *fl) +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) { - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; } /** @@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter, /** * get_buf_size - return the size of an RX Free List buffer. + * @adapter: pointer to the associated adapter * @sdesc: pointer to the software buffer descriptor */ -static inline int get_buf_size(const struct rx_sw_desc *sdesc) +static inline int get_buf_size(const struct adapter *adapter, + const struct rx_sw_desc *sdesc) { - return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) - ? (PAGE_SIZE << FL_PG_ORDER) - : PAGE_SIZE; + const struct sge *s = &adapter->sge; + + return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); } /** @@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); put_page(sdesc->page); sdesc->page = NULL; if (++fl->cidx == fl->size) @@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); sdesc->page = NULL; if (++fl->cidx == fl->size) fl->cidx = 0; @@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz) static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, int n, gfp_t gfp) { + struct sge *s = &adapter->sge; struct page *page; dma_addr_t dma_addr; unsigned int cred = fl->avail; @@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, * If we don't support large pages, drop directly into the small page * allocation code. */ - if (FL_PG_ORDER == 0) + if (s->fl_pg_order == 0) goto alloc_small_pages; while (n) { page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - FL_PG_ORDER); + s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large @@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, fl->large_alloc_failed++; break; } - poison_buf(page, PAGE_SIZE << FL_PG_ORDER); + poison_buf(page, PAGE_SIZE << s->fl_pg_order); dma_addr = dma_map_page(adapter->pdev_dev, page, 0, - PAGE_SIZE << FL_PG_ORDER, + PAGE_SIZE << s->fl_pg_order, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { /* @@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, * because DMA mapping resources are typically * critical resources once they become scarse. */ - __free_pages(page, FL_PG_ORDER); + __free_pages(page, s->fl_pg_order); goto out; } dma_addr |= RX_LARGE_BUF; @@ -693,7 +689,7 @@ out: fl->pend_cred += cred; ring_fl_db(adapter, fl); - if (unlikely(fl_starving(fl))) { + if (unlikely(fl_starving(adapter, fl))) { smp_wmb(); set_bit(fl->cntxt_id, adapter->sge.starving_fl); } @@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl) static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, const struct cpl_rx_pkt *pkt) { + struct adapter *adapter = rxq->rspq.adapter; + struct sge *s = &adapter->sge; int ret; struct sk_buff *skb; @@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, return; } - copy_frags(skb, gl, PKTSHIFT); - skb->len = gl->tot_len - PKTSHIFT; + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, bool csum_ok = pkt->csum_calc && !pkt->err_vec && (rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; /* * If this is a good TCP packet and we have Generic Receive Offload @@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } - __skb_pull(skb, PKTSHIFT); + __skb_pull(skb, s->pktshift); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); rxq->stats.pkts++; @@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq) static int process_responses(struct sge_rspq *rspq, int budget) { struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; int budget_left = budget; while (likely(budget_left)) { @@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) BUG_ON(frag >= MAX_SKB_FRAGS); BUG_ON(rxq->fl.avail == 0); sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(sdesc); + bufsz = get_buf_size(adapter, sdesc); fp->page = sdesc->page; fp->offset = rspq->offset; fp->size = min(bufsz, len); @@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) */ ret = rspq->handler(rspq, rspq->cur_desc, &gl); if (likely(ret == 0)) - rspq->offset += ALIGN(fp->size, FL_ALIGN); + rspq->offset += ALIGN(fp->size, s->fl_align); else restore_rx_bufs(&gl, &rxq->fl, frag); } else if (likely(rsp_type == RSP_TYPE_CPL)) { @@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data) * schedule napi but the FL is no longer starving. * No biggie. */ - if (fl_starving(fl)) { + if (fl_starving(adapter, fl)) { struct sge_eth_rxq *rxq; rxq = container_of(fl, struct sge_eth_rxq, fl); @@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) { + struct sge *s = &adapter->sge; struct port_info *pi = netdev_priv(dev); struct fw_iq_cmd cmd, rpl; int ret, iqandst, flsz = 0; @@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->size = roundup(fl->size, FL_PER_EQ_UNIT); fl->desc = alloc_ring(adapter->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), - &fl->addr, &fl->sdesc, STAT_LEN); + &fl->addr, &fl->sdesc, s->stat_len); if (!fl->desc) { ret = -ENOMEM; goto err; @@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * free list ring) in Egress Queue Units. */ flsz = (fl->size / FL_PER_EQ_UNIT + - STAT_LEN / EQ_UNIT); + s->stat_len / EQ_UNIT); /* * Fill in all the relevant firmware Ingress Queue Command @@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *devq, unsigned int iqid) { + struct sge *s = &adapter->sge; int ret, nentries; struct fw_eq_eth_cmd cmd, rpl; struct port_info *pi = netdev_priv(dev); @@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * Calculate the size of the hardware TX Queue (including the Status * Page on the end of the TX Queue) in units of TX Descriptors. */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); /* * Allocate the hardware ring for the TX ring (with space for its @@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); if (!txq->q.desc) return -ENOMEM; @@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, */ static void free_txq(struct adapter *adapter, struct sge_txq *tq) { + struct sge *s = &adapter->sge; + dma_free_coherent(adapter->pdev_dev, - tq->size * sizeof(*tq->desc) + STAT_LEN, + tq->size * sizeof(*tq->desc) + s->stat_len, tq->desc, tq->phys_addr); tq->cntxt_id = 0; tq->sdesc = NULL; @@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq) static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) { + struct sge *s = &adapter->sge; unsigned int flid = fl ? fl->cntxt_id : 0xffff; t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, @@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, if (fl) { free_rx_bufs(adapter, fl, fl->avail); dma_free_coherent(adapter->pdev_dev, - fl->size * sizeof(*fl->desc) + STAT_LEN, + fl->size * sizeof(*fl->desc) + s->stat_len, fl->desc, fl->addr); kfree(fl->sdesc); fl->sdesc = NULL; @@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; + unsigned int ingpadboundary, ingpackboundary; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter) * Now translate the adapter parameters into our internal forms. */ if (fl1) - FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; - STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) - ? 128 : 64); - PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); - FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + - SGE_INGPADBOUNDARY_SHIFT); + s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) + ? 128 : 64); + s->pktshift = PKTSHIFT_GET(sge_params->sge_control); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adapter->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + /* A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + s->fl_starve_thres + = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; /* * Set up tasklet timers. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 95df61dcb4ce..4b6a6d14d86d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -134,11 +134,13 @@ struct dev_params { */ struct sge_params { u32 sge_control; /* padding, boundaries, lengths, etc. */ + u32 sge_control2; /* T5: more of the same */ u32 sge_host_page_size; /* RDMA page sizes */ u32 sge_queues_per_page; /* RDMA queues/page */ u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_congestion_control; /* congestion thresholds, etc. */ u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ u32 sge_timer_value_2_and_3; u32 sge_timer_value_4_and_5; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e984fdc48ba2..1e896b923234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter) sge_params->sge_timer_value_2_and_3 = vals[5]; sge_params->sge_timer_value_4_and_5 = vals[6]; + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately with the Padding Boundary in SGE_CONTROL and and Packing + * Boundary in SGE_CONTROL2. So for T5 and later we need to grab + * SGE_CONTROL in order to determine how ingress packet data will be + * laid out in Packed Buffer Mode. Unfortunately, older versions of + * the firmware won't let us retrieve SGE_CONTROL2 so if we get a + * failure grabbing it we throw an error since we can't figure out the + * right value. + */ + if (!is_t4(adapter->params.chip)) { + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter->pdev_dev, + "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_control2 = vals[0]; + } + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - v = t4vf_query_params(adapter, 1, params, vals); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); + v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; sge_params->sge_ingress_rx_threshold = vals[0]; + sge_params->sge_congestion_control = vals[1]; return 0; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 180e53fa628f..73cf1653a4a3 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) struct vnic_rq_buf *buf = rq->to_use; if (buf->os_buf) { - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { - /* Adding write memory barrier prevents compiler and/or - * CPU reordering, thus avoiding descriptor posting - * before descriptor is initialized. Otherwise, hardware - * can read stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); - } + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); return 0; } @@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, enic->rq_truncated_pkts++; } + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; return; } @@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Buffer overflow */ + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; } } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 50a851db2852..3dca494797bd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len) return bufaddr; } +static void swap_buffer2(void *dst_buf, void *src_buf, int len) +{ + int i; + unsigned int *src = src_buf; + unsigned int *dst = dst_buf; + + for (i = 0; i < len; i += 4, src++, dst++) + *dst = swab32p(src); +} + static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff } static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length) + struct bufdesc *bdp, u32 length, bool swap) { struct fec_enet_private *fep = netdev_priv(ndev); struct sk_buff *new_skb; @@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); - memcpy(new_skb->data, (*skb)->data, length); + if (!swap) + memcpy(new_skb->data, (*skb)->data, length); + else + swap_buffer2(new_skb->data, (*skb)->data, length); *skb = new_skb; return true; @@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool is_copybreak; + bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; #ifdef CONFIG_M532x flush_cache_all(); @@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, + need_swap); if (!is_copybreak) { skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (unlikely(!skb_new)) { @@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, pkt_len - 4); data = skb->data; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (!is_copybreak && need_swap) swap_buffer(data, pkt_len); /* Extract the enhanced buffer descriptor */ @@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); } rtnl_unlock(); - fec_enet_clk_enable(ndev, false); - pinctrl_pm_select_sleep_state(&fep->pdev->dev); - if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev) return ret; } - pinctrl_pm_select_default_state(&fep->pdev->dev); - ret = fec_enet_clk_enable(ndev, true); - if (ret) - goto failed_clk; - rtnl_lock(); if (netif_running(ndev)) { + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) { + rtnl_unlock(); + goto failed_clk; + } fec_restart(ndev); netif_tx_lock_bh(ndev); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index d47b19f27c35..28b81ae09b5a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { - s32 status; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; @@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); - - return status; + return 0; } /** diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index b151a949f352..d44560d1d268 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) int tx_index; struct tx_desc *desc; u32 cmd_sts; - struct sk_buff *skb; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; @@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) reclaimed++; txq->tx_desc_count--; - skb = NULL; - if (cmd_sts & TX_LAST_DESC) - skb = __skb_dequeue(&txq->tx_skb); + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, + desc->byte_cnt, DMA_TO_DEVICE); + + if (cmd_sts & TX_ENABLE_INTERRUPT) { + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); + + if (!WARN_ON(!skb)) + dev_kfree_skb(skb); + } if (cmd_sts & ERROR_SUMMARY) { netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); - dev_kfree_skb(skb); } __netif_tx_unlock_bh(nq); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ece83f101526..fdf3e382e464 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, { struct mvpp2_prs_entry *pe; int tid_aux, tid; + int ret = 0; pe = mvpp2_prs_vlan_find(priv, tpid, ai); @@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, break; } - if (tid <= tid_aux) - return -EINVAL; + if (tid <= tid_aux) { + ret = -EINVAL; + goto error; + } memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* Get first free double vlan ai number */ @@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned int port_map) { struct mvpp2_prs_entry *pe; - int tid_aux, tid, ai; + int tid_aux, tid, ai, ret = 0; pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); @@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; + if (ai < 0) { + ret = ai; + goto error; + } /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; @@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, break; } - if (tid >= tid_aux) - return -ERANGE; + if (tid >= tid_aux) { + ret = -ERANGE; + goto error; + } memset(pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* IPv4 header parsing for fragmentation and L4 offset */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f3032fec8fce..02266e3de514 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); out: - if (ret) + if (ret) { en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + return; + } + + /* set offloads */ + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) int ret; struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); + /* unset offloads */ + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) dev->priv_flags |= IFF_UNICAST_FLT; - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL; - } - mdev->pndev[port] = dev; netif_carrier_off(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index a278238a2db6..ad2c96a02a53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); eq->eqn = out.eq_number; + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, eq->name, eq); if (err) goto err_eq; - eq->irqn = vecidx; - eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = mlx5_debug_eq_add(dev, eq); if (err) goto err_irq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3d8e8e489b2d..71b10b210792 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev, dev->profile = &profile[prof_sel]; dev->event = mlx5_core_event; + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); err = mlx5_dev_init(dev, pdev); if (err) { dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); goto out; } - INIT_LIST_HEAD(&priv->ctx_list); - spin_lock_init(&priv->ctx_lock); err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 0b2a1ccd276d..613037584d08 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work) if (test_bit(__NX_RESETTING, &adapter->state)) goto reschedule; - if (test_bit(__NX_DEV_UP, &adapter->state)) { + if (test_bit(__NX_DEV_UP, &adapter->state) && + !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { if (!adapter->has_link_events) { netxen_nic_handle_phy_intr(adapter); diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index f3a47147937d..9a49f42ac2ba 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -5,7 +5,6 @@ config NET_VENDOR_QUALCOMM bool "Qualcomm devices" default y - depends on SPI_MASTER && OF_GPIO ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM config QCA7000 tristate "Qualcomm Atheros QCA7000 support" - depends on SPI_MASTER && OF_GPIO + depends on SPI_MASTER && OF ---help--- This SPI protocol driver supports the Qualcomm Atheros QCA7000. diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 002d4cdc319f..a77f05ce8325 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx) EFX_MAX_CHANNELS, resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); - BUG_ON(efx->max_channels == 0); + if (WARN_ON(efx->max_channels == 0)) + return -EIO; nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 2c62208077fe..6cc3cf6f17c8 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res, *ires; + struct resource *res; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; + unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { ret = -ENODEV; goto out_release_io; } - - ndev->irq = ires->start; - - if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) - irq_flags = ires->flags & IRQF_TRIGGER_MASK; + /* + * If this platform does not specify any special irqflags, or if + * the resource supplies a trigger, override the irqflags with + * the trigger flags from the resource. + */ + irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) + irq_flags = irq_resflags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index affb29da353e..77ed74561e5f 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) spin_unlock(&pdata->mac_lock); } +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + /* If the internal PHY is in General Power-Down mode, all, except the + * management interface, is powered-down and stays in that condition as + * long as Phy register bit 0.11 is HIGH. + * + * In that case, clear the bit 0.11, so the PHY powers up and we can + * access to the phy registers. + */ + rc = phy_read(pdata->phy_dev, MII_BMCR); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* If the PHY general power-down bit is not set is not necessary to + * disable the general power down-mode. + */ + if (rc & BMCR_PDOWN) { + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + usleep_range(1000, 1500); + } + + return 0; +} + static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { int rc = 0; @@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) return rc; } - /* - * If energy is detected the PHY is already awake so is not necessary - * to disable the energy detect power-down mode. - */ - if ((rc & MII_LAN83C185_EDPWRDOWN) && - !(rc & MII_LAN83C185_ENERGYON)) { + /* Only disable if energy detect mode is already enabled */ + if (rc & MII_LAN83C185_EDPWRDOWN) { /* Disable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc & (~MII_LAN83C185_EDPWRDOWN)); @@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); + /* Allow PHY to wakeup */ + mdelay(2); } return 0; @@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) /* Only enable if energy detect mode is already disabled */ if (!(rc & MII_LAN83C185_EDPWRDOWN)) { - mdelay(100); /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); @@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); } return 0; } @@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) int ret; /* + * Make sure to power-up the PHY chip before doing a reset, otherwise + * the reset fails. + */ + ret = smsc911x_phy_general_power_up(pdata); + if (ret) { + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); + return ret; + } + + /* * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that * are initialized in a Energy Detect Power-Down mode that prevents * the MAC chip to be software reseted. So we have to wakeup the PHY diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f77a46c7e2c..18c46bb0f3bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) bool stmmac_eee_init(struct stmmac_priv *priv) { char *phy_bus_name = priv->plat->phy_bus_name; + unsigned long flags; bool ret = false; /* Using PCS we cannot dial with the phy registers at this stage @@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ + spin_lock_irqsave(&priv->lock, flags); if (priv->eee_active) { pr_debug("stmmac: disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); @@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) tx_lpi_timer); } priv->eee_active = 0; + spin_unlock_irqrestore(&priv->lock, flags); goto out; } /* Activate the EEE and start timers */ + spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; init_timer(&priv->eee_ctrl_timer); @@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Set HW EEE according to the speed */ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); - pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); - ret = true; + spin_unlock_irqrestore(&priv->lock, flags); + + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); } out: return ret; @@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); + spin_unlock_irqrestore(&priv->lock, flags); + /* At this stage, it could be needed to setup the EEE or adjust some * MAC related HW registers. */ priv->eee_enabled = stmmac_eee_init(priv); - - spin_unlock_irqrestore(&priv->lock, flags); } /** @@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) } static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, - int i) + int i, gfp_t flags) { struct sk_buff *skb; skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, - GFP_KERNEL); + flags); if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; @@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static int init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); @@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - ret = stmmac_init_rx_buffers(priv, p, i); + ret = stmmac_init_rx_buffers(priv, p, i, flags); if (ret) goto err_init_rx_buffers; @@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - ret = init_dma_desc_rings(dev); - if (ret < 0) { - pr_err("%s: DMA descriptors initialization failed\n", __func__); - return ret; - } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev) } priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - priv->eee_enabled = stmmac_eee_init(priv); - - stmmac_init_tx_coalesce(priv); - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { priv->rx_riwt = MAX_DMA_RIWT; priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); @@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev) goto dma_desc_error; } + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto init_error; + } + ret = stmmac_hw_setup(dev); if (ret < 0) { pr_err("%s: Hw setup failed\n", __func__); goto init_error; } + stmmac_init_tx_coalesce(priv); + if (priv->phydev) phy_start(priv->phydev); @@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); unsigned int enh_desc = priv->plat->enh_desc; + spin_lock(&priv->tx_lock); + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + spin_unlock(&priv->tx_lock); if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ @@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - spin_lock(&priv->tx_lock); - if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; dma_map_err: + spin_unlock(&priv->tx_lock); dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; @@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock(&priv->lock); priv->hw->mac->set_filter(priv->hw, dev); - spin_unlock(&priv->lock); } /** @@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->stmmac_clk); + clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ - clk_prepare_enable(priv->stmmac_clk); + clk_enable(priv->stmmac_clk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); @@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); + init_dma_desc_rings(ndev, GFP_ATOMIC); stmmac_hw_setup(ndev); + stmmac_init_tx_coalesce(priv); napi_enable(&priv->napi); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72c8525d5457..9c014803b03b 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) HMD(("init rxring, ")); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; + u32 mapping; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { @@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp) /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(hp->dma_dev, mapping)) { + dev_kfree_skb_any(skb); + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), - dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(skb, RX_OFFSET); } @@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; + u32 mapping; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); @@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) drops++; goto drop_it; } + skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, new_skb->data, + RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + dev_kfree_skb_any(new_skb); + drops++; + goto drop_it; + } + dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; - skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), - dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } +static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, + u32 first_len, u32 first_entry, u32 entry) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + + dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); + + first_entry = NEXT_TX(first_entry); + while (first_entry != entry) { + struct happy_meal_txd *this = &txbase[first_entry]; + u32 addr, len; + + addr = hme_read_desc32(hp, &this->tx_addr); + len = hme_read_desc32(hp, &this->tx_flags); + len &= TXFLAG_SIZE; + dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); + } +} + static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) + goto out_dma_error; tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), @@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) + goto out_dma_error; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + unmap_partial_tx_skb(hp, first_mapping, first_len, + first_entry, entry); + goto out_dma_error; + } this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; @@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; + +out_dma_error: + hp->tx_skbs[hp->tx_new] = NULL; + spin_unlock_irq(&hp->happy_lock); + + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 3ae83879a75f..097ebe7077ac 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale) { if (!ale) return -EINVAL; - cpsw_ale_stop(ale); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); kfree(ale); return 0; diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index ab92f67da035..4a4388b813ac 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, switch (ptp_class & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 6f226de655a4..880cc090dc44 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr->csum_start = skb_checksum_start_offset(skb); + if (vlan_tx_tag_present(skb)) + vnet_hdr->csum_start += VLAN_HLEN; vnet_hdr->csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2954052706e8..e22e602beef3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; @@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1dfffdc9dfc3..767cd110f496 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mii_data = if_mii(ifr); u16 val = mii_data->val_in; + bool change_autoneg = false; switch (cmd) { case SIOCGMIIPHY: @@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->phy_id == phydev->addr) { switch (mii_data->reg_num) { case MII_BMCR: - if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) + if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { + if (phydev->autoneg == AUTONEG_ENABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_DISABLE; - else + if (val & BMCR_FULLDPLX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + if (val & BMCR_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & BMCR_SPEED100) + phydev->speed = SPEED_100; + else phydev->speed = SPEED_10; + } + else { + if (phydev->autoneg == AUTONEG_DISABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_ENABLE; - if (!phydev->autoneg && (val & BMCR_FULLDPLX)) - phydev->duplex = DUPLEX_FULL; - else - phydev->duplex = DUPLEX_HALF; - if (!phydev->autoneg && (val & BMCR_SPEED1000)) - phydev->speed = SPEED_1000; - else if (!phydev->autoneg && - (val & BMCR_SPEED100)) - phydev->speed = SPEED_100; + } break; case MII_ADVERTISE: - phydev->advertising = val; + phydev->advertising = mii_adv_to_ethtool_adv_t(val); + change_autoneg = true; break; default: /* do nothing */ @@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); + + if (change_autoneg) + return phy_start_aneg(phydev); + return 0; case SIOCSHWTSTAMP: diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 68c3a3f4e0ab..794a47329368 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *pass_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->pass_filter) { - bpf_prog_destroy(ppp->pass_filter); - ppp->pass_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&pass_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->pass_filter) + bpf_prog_destroy(ppp->pass_filter); + ppp->pass_filter = pass_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->pass_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } @@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *active_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->active_filter) { - bpf_prog_destroy(ppp->active_filter); - ppp->active_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&active_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->active_filter) + bpf_prog_destroy(ppp->active_filter); + ppp->active_filter = active_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->active_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7302398f0b1f..9dd3746994a4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; int vlan_offset = 0, copied; + int vlan_hlen = 0; + int vnet_hdr_sz = 0; + + if (vlan_tx_tag_present(skb)) + vlan_hlen = VLAN_HLEN; + + if (tun->flags & TUN_VNET_HDR) + vnet_hdr_sz = tun->vnet_hdr_sz; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) return -EINVAL; - if (len < skb->len) { + if (len < skb->len + vlan_hlen + vnet_hdr_sz) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } @@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { + if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= tun->vnet_hdr_sz) < 0) + if ((len -= vnet_hdr_sz) < 0) return -EINVAL; if (skb_is_gso(skb)) { @@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb_checksum_start_offset(skb); + gso.csum_start = skb_checksum_start_offset(skb) + + vlan_hlen; gso.csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, sizeof(gso)))) return -EFAULT; - total += tun->vnet_hdr_sz; + total += vnet_hdr_sz; } copied = total; - total += skb->len; - if (!vlan_tx_tag_present(skb)) { - len = min_t(int, skb->len, len); - } else { + len = min_t(int, skb->len + vlan_hlen, len); + total += skb->len + vlan_hlen; + if (vlan_hlen) { int copy, ret; struct { __be16 h_vlan_proto; @@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); - total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 2c05f6cdb12f..816d511e34d3 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) return ret; } - ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, AX_SWRESET_CLEAR); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE); + ax88772_reset(dev); /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ca309820d39e..fa9dc45b75a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } -/* Find VXLAN socket based on network namespace and UDP port */ -static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) +/* Find VXLAN socket based on network namespace, address family and UDP port */ +static struct vxlan_sock *vxlan_find_sock(struct net *net, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { - if (inet_sk(vs->sock->sk)->inet_sport == port) + if (inet_sk(vs->sock->sk)->inet_sport == port && + inet_sk(vs->sock->sk)->sk.sk_family == family) return vs; } return NULL; @@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, family, port); if (!vs) return NULL; @@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); int err = -ENOSYS; + udp_tunnel_gro_complete(skb, nhoff); + eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); type = eh->h_proto; @@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; dst_release(ndst); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = vxlan->flags & VXLAN_F_IPV6; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); + vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port); if (vs) { /* If we have a socket with same port already, reuse it */ atomic_inc(&vs->refcnt); @@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = flags & VXLAN_F_IPV6; vs = vxlan_socket_create(net, port, rcv, data, flags); if (!IS_ERR(vs)) @@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, return vs; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); if (vs) { if (vs->rcv == rcv) atomic_inc(&vs->refcnt); @@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; - if (vxlan_find_vni(net, vni, vxlan->dst_port)) { + if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; } diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index e0d9f19650b0..eb03943f8463 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->init_ucode_complete)) + if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) return 0; iwl_init_notification_wait(&mvm->notif_wait, @@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) goto out; } + mvm->calibrating = true; + /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); if (ret) @@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) MVM_UCODE_CALIB_TIMEOUT); if (!ret) mvm->init_ucode_complete = true; + + if (ret && iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); + ret = 1; + } goto out; error: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: + mvm->calibrating = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 585fe5b7100f..b62405865b25 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->ps_disabled = false; + mvm->calibrating = false; /* just in case one was running */ ieee80211_remain_on_channel_expired(mvm->hw); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index b153ced7015b..845429c88cf4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -548,6 +548,7 @@ struct iwl_mvm { enum iwl_ucode_type cur_ucode; bool ucode_loaded; bool init_ucode_complete; + bool calibrating; u32 error_event_table; u32 log_event_table; u32 umac_error_event_table; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 48cb25a93591..5b719ee8e789 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; mvm->low_latency_agg_frame_limit = 6; + mvm->cur_ucode = IWL_UCODE_INIT; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + bool calibrating = ACCESS_ONCE(mvm->calibrating); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); @@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); - return state && mvm->cur_ucode != IWL_UCODE_INIT; + /* iwl_run_init_mvm_ucode is waiting for results, abort it */ + if (calibrating) + iwl_abort_notification_waits(&mvm->notif_wait); + + /* + * Stop the device if we run OPERATIONAL firmware or if we are in the + * middle of the calibrations. + */ + return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 3781b029e54a..160c3ebc48d0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) * restart. So don't process again if the device is * already dead. */ - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); - clear_bit(STATUS_DEVICE_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); clear_bit(STATUS_RFKILL, &trans->status); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index babbdc1ce741..c9ad4cf1adfb 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, if (err != 0) { printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", err); - goto failed_hw; + goto failed_bind; } skb_queue_head_init(&data->pending); @@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, return idx; failed_hw: + device_release_driver(data->dev); +failed_bind: device_unregister(data->dev); failed_drvdata: ieee80211_free_hw(hw); |