summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fec.c112
-rw-r--r--drivers/net/ethernet/freescale/fec.h19
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c82
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c76
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
45 files changed, 528 insertions, 339 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 639049d7e92d..da5f4397f87c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
} else {
+ /* Omit CRC. */
+ len -= ETH_FCS_LEN;
+
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
if (new_skb) {
skb_put(new_skb, len);
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
new_skb->data,
len);
+ skb_checksum_none_assert(skb);
new_skb->protocol =
eth_type_trans(new_skb, bgmac->net_dev);
netif_receive_skb(new_skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecac04a3687c..4046f97378c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2760,6 +2760,7 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+ bnx2x_del_all_napi(bp);
/* clear pf_load status, as it was already set */
if (IS_PF(bp))
@@ -3142,7 +3143,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
tsum = ~csum_fold(csum_add((__force __wsum) csum,
csum_partial(t_header, -fix, 0)));
- return bswab16(csum);
+ return bswab16(tsum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9a674b14b403..edfa67adf2f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+ if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1663e0b6b5a0..77ebae0ac64a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6,
&rx_tx_in_reset);
- if (!rx_tx_in_reset) {
+ if ((!rx_tx_in_reset) &&
+ (params->link_flags &
+ PHY_INITIALIZED)) {
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_config_sfi(phy, params);
bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -10422,6 +10424,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_ON:
@@ -10468,6 +10492,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
}
break;
@@ -10532,6 +10578,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ LINK_FLAGS_INT_DISABLED) {
+ bnx2x_link_int_enable(params);
+ params->link_flags &=
+ ~LINK_FLAGS_INT_DISABLED;
+ }
+ }
}
break;
}
@@ -11791,6 +11853,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
@@ -12465,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
+ vars->check_kr2_recovery_cnt = 0;
+ params->link_flags = PHY_INITIALIZED;
/* Driver opens NIG-BRB filters */
bnx2x_set_rx_filter(params, 1);
/* Check if link flap can be avoided */
@@ -12629,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
struct bnx2x *bp = params->bp;
vars->link_up = 0;
vars->phy_flags = 0;
+ params->link_flags &= ~PHY_INITIALIZED;
if (!params->lfa_base)
return bnx2x_link_reset(params, vars, 1);
/*
@@ -13349,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
bnx2x_update_link_attr(params, vars->link_attr_sync);
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
/* Restart AN on leading lane */
bnx2x_warpcore_restart_AN_KR(phy, params);
}
@@ -13377,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
return;
}
+ /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+ * since some switches tend to reinit the AN process and clear the
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * and recovered many times
+ */
+ if (vars->check_kr2_recovery_cnt > 0) {
+ vars->check_kr2_recovery_cnt--;
+ return;
+ }
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
@@ -13437,7 +13514,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & SPEED_20000))
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d25c7d79787a..56c2aae4e2c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -307,7 +307,9 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
- u16 rsrv1;
+ u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED (1<<0)
+#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
};
@@ -341,7 +343,8 @@ struct link_vars {
u32 link_status;
u32 eee_status;
u8 fault_detected;
- u8 rsrv1;
+ u8 check_kr2_recovery_cnt;
+#define CHECK_KR2_RECOVERY_CNT 5
u16 periodic_flags;
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 364e37ecbc5c..198f6f1c9ad5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old {
#define UPDATE_QSTAT(s, t) \
do { \
- qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+ + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
} while (0)
#define UPDATE_QSTAT_OLD(f) \
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fdb9b5655414..67d2663b3974 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp)
tg3_ump_link_report(tp);
}
+
+ tp->link_up = netif_carrier_ok(tp->dev);
}
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
@@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
-static void tg3_carrier_on(struct tg3 *tp)
-{
- netif_carrier_on(tp->dev);
- tp->link_up = true;
-}
-
static void tg3_carrier_off(struct tg3 *tp)
{
netif_carrier_off(tp->dev);
@@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp)
return -EBUSY;
if (netif_running(tp->dev) && tp->link_up) {
- tg3_carrier_off(tp);
+ netif_carrier_off(tp->dev);
tg3_link_report(tp);
}
@@ -4134,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tp->link_config.active_speed = tp->link_config.speed;
tp->link_config.active_duplex = tp->link_config.duplex;
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+ /* With autoneg disabled, 5715 only links up when the
+ * advertisement register has the configured speed
+ * enabled.
+ */
+ tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
+ }
+
bmcr = 0;
switch (tp->link_config.speed) {
default:
@@ -4262,9 +4266,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
{
if (curr_link_up != tp->link_up) {
if (curr_link_up) {
- tg3_carrier_on(tp);
+ netif_carrier_on(tp->dev);
} else {
- tg3_carrier_off(tp);
+ netif_carrier_off(tp->dev);
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4ce62031f62f..8049268ce0f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_BASE 0
#define VPD_LEN 512
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
- int i, ret;
+ int i, ret, addr;
int ec, sn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+ if (ret < 0)
+ goto out;
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
goto out;
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 0c37fb2cc867..1df33c799c00 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -108,6 +108,7 @@ config TULIP_DM910X
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on (PCI || EISA)
+ depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
select CRC32
---help---
This is support for the DIGITAL series of PCI/EISA Ethernet cards.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 28ceb8414185..29aff55f2eea 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -349,6 +349,7 @@ struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
+ u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 071aea79d218..3c9b4f12e3e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
-static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+static u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
- u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
- SLIPORT_SEMAPHORE_OFFSET_BE;
- pci_read_config_dword(adapter->pdev, reg, &sem);
- *stage = sem & POST_STAGE_MASK;
-
- if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
- return -1;
+ if (BEx_chip(adapter))
+ sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
else
- return 0;
+ pci_read_config_dword(adapter->pdev,
+ SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+
+ return sem & POST_STAGE_MASK;
}
int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
}
do {
- status = be_POST_stage_get(adapter, &stage);
- if (status) {
- dev_err(dev, "POST error; stage=0x%x\n", stage);
- return -1;
- } else if (stage != POST_STAGE_ARMFW_RDY) {
- if (msleep_interruptible(2000)) {
- dev_err(dev, "Waiting for POST aborted\n");
- return -EINTR;
- }
- timeout += 2;
- } else {
+ stage = be_POST_stage_get(adapter);
+ if (stage == POST_STAGE_ARMFW_RDY)
return 0;
+
+ dev_info(dev, "Waiting for POST, %ds elapsed\n",
+ timeout);
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
}
+ timeout += 2;
} while (timeout < 60);
dev_err(dev, "POST timeout; stage=0x%x\n", stage);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 541d4530d5bf..62dc220695f7 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,8 +32,8 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
-#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
-#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
+#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3860888ac711..08e54f3d288b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
+ if (adapter->csr)
+ pci_iounmap(adapter->pdev, adapter->csr);
if (adapter->db)
pci_iounmap(adapter->pdev, adapter->db);
}
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
SLI_INTF_IF_TYPE_SHIFT;
+ if (BEx_chip(adapter) && be_physfn(adapter)) {
+ adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+ if (adapter->csr == NULL)
+ return -ENOMEM;
+ }
+
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
if (addr == NULL)
goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
+ dev_info(&adapter->pdev->dev,
+ "Waiting for FW to be ready after EEH reset\n");
status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141d..e3f39372ce25 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
- unsigned long flags;
+ unsigned int index;
if (!fep->link) {
/* Link is down or autonegotiation is in progress. */
return NETDEV_TX_BUSY;
}
- spin_lock_irqsave(&fep->hw_lock, flags);
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* This should not happen, since ndev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", ndev->name);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- unsigned int index;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, skb->len);
/* Save skb pointer */
- fep->tx_skbuff[fep->skb_cur] = skb;
-
- ndev->stats.tx_bytes += skb->len;
- fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = skb;
/* Push the data cache so the CPM does not get stale memory
* data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
}
- /* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
/* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- if (bdp == fep->dirty_tx) {
- fep->tx_full = 1;
+ fep->cur_tx = bdp;
+
+ if (fep->cur_tx == fep->dirty_tx)
netif_stop_queue(ndev);
- }
- fep->cur_tx = bdp;
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
skb_tx_timestamp(skb);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
-
return NETDEV_TX_OK;
}
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
+ int index = 0;
fep = netdev_priv(ndev);
- spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
+ /* get next bdp of dirty_tx */
+ if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
- if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+ /* current queue is empty */
+ if (bdp == fep->cur_tx)
break;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
- skb = fep->tx_skbuff[fep->skb_dirty];
+ skb = fep->tx_skbuff[index];
+
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- fep->tx_skbuff[fep->skb_dirty] = NULL;
- fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = NULL;
+
+ fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->tx_full) {
- fep->tx_full = 0;
+ if (fep->dirty_tx != fep->cur_tx) {
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
}
- fep->dirty_tx = bdp;
- spin_unlock(&fep->hw_lock);
+ return;
}
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events, fep->hwp + FEC_IEVENT);
- if (int_events & FEC_ENET_RXF) {
+ if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
ret = IRQ_HANDLED;
/* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
}
}
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(ndev);
- }
-
if (int_events & FEC_ENET_MII) {
ret = IRQ_HANDLED;
complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
int pkts = fec_enet_rx(ndev, budget);
struct fec_enet_private *fep = netdev_priv(ndev);
+ fec_enet_tx(ndev);
+
if (pkts < budget) {
napi_complete(napi);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -939,24 +934,28 @@ static void fec_enet_adjust_link(struct net_device *ndev)
goto spin_unlock;
}
- /* Duplex link change */
if (phy_dev->link) {
- if (fep->full_duplex != phy_dev->duplex) {
- fec_restart(ndev, phy_dev->duplex);
- /* prevent unnecessary second fec_restart() below */
+ if (!fep->link) {
fep->link = phy_dev->link;
status_change = 1;
}
- }
- /* Link on or off change */
- if (phy_dev->link != fep->link) {
- fep->link = phy_dev->link;
- if (phy_dev->link)
+ if (fep->full_duplex != phy_dev->duplex)
+ status_change = 1;
+
+ if (phy_dev->speed != fep->speed) {
+ fep->speed = phy_dev->speed;
+ status_change = 1;
+ }
+
+ /* if any of the above changed restart the FEC */
+ if (status_change)
fec_restart(ndev, phy_dev->duplex);
- else
+ } else {
+ if (fep->link) {
fec_stop(ndev);
- status_change = 1;
+ status_change = 1;
+ }
}
spin_unlock:
@@ -1442,6 +1441,7 @@ fec_enet_close(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
/* Don't know what to do yet. */
+ napi_disable(&fep->napi);
fep->opened = 0;
netif_stop_queue(ndev);
fec_stop(ndev);
@@ -1646,6 +1646,7 @@ static int fec_enet_init(struct net_device *ndev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1658,7 @@ static int fec_enet_init(struct net_device *ndev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
fec_restart(ndev, 0);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c4..eb4372962839 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -97,6 +97,13 @@ struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
};
+#else
+struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#endif
struct bufdesc_ex {
struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
unsigned short res0[4];
};
-#else
-struct bufdesc {
- unsigned short cbd_sc; /* Control and status info */
- unsigned short cbd_datlen; /* Data length */
- unsigned long cbd_bufaddr; /* Buffer address */
-};
-#endif
-
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
struct sk_buff *rx_skbuff[RX_RING_SIZE];
- ushort skb_cur;
- ushort skb_dirty;
/* CPM dual port RAM relative addresses */
dma_addr_t bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
- uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
@@ -244,6 +240,7 @@ struct fec_enet_private {
phy_interface_t phy_interface;
int link;
int full_duplex;
+ int speed;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
int bufdesc_ex;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 2c1813737f6d..f91a8f3f9d48 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/mdio.h>
+#include <linux/pm_runtime.h>
#include "e1000.h"
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static int e1000e_ethtool_begin(struct net_device *netdev)
+{
+ return pm_runtime_get_sync(netdev->dev.parent);
+}
+
+static void e1000e_ethtool_complete(struct net_device *netdev)
+{
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
+ .begin = e1000e_ethtool_begin,
+ .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index dff7bff8b8e0..121a865c7fbd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -782,6 +782,59 @@ release:
}
/**
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ * preventing further DMA write requests. Workaround the issue by disabling
+ * the de-assertion of the clock request when in 1Gpbs mode.
+ **/
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+ u32 fextnvm6 = er32(FEXTNVM6);
+ s32 ret_val = 0;
+
+ if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
+ u16 kmrn_reg;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ goto release;
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg &
+ ~E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto release;
+
+ usleep_range(10, 20);
+
+ ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+ ret_val =
+ e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+release:
+ hw->phy.ops.release(hw);
+ } else {
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ /* Work-around I218 hang issue */
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
if (hw->phy.type == e1000_phy_i217) {
- u16 phy_reg;
+ u16 phy_reg, device_id = hw->adapter->pdev->device;
+
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index b6d3174d7d2d..8bf4655c2e17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -92,6 +92,8 @@
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a177b8b65c44..948b86ffa4f0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
adapter->idle_check = true;
+ hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
/* fire a link status change interrupt to start the watchdog */
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
+ pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
+ pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -5887,8 +5890,7 @@ release:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
- bool runtime)
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
e1000e_reset_interrupt_capability(adapter);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
ew32(WUFC, 0);
}
- *enable_wake = !!wufc;
-
- /* make sure adapter isn't asleep if manageability is enabled */
- if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
- (hw->mac.ops.check_mng_mode(hw)))
- *enable_wake = true;
-
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
*/
e1000e_release_hw_control(adapter);
- pci_disable_device(pdev);
-
- return 0;
-}
-
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
-{
- if (sleep && wake) {
- pci_prepare_to_sleep(pdev);
- return;
- }
-
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
- bool wake)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ pci_clear_master(pdev);
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
- e1000_power_off(pdev, sleep, wake);
+ pci_save_state(pdev);
+ pci_prepare_to_sleep(pdev);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
- } else {
- e1000_power_off(pdev, sleep, wake);
}
+
+ return 0;
}
#ifdef CONFIG_PCIEASPM
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (aspm_disable_flag)
e1000e_disable_aspm(pdev, aspm_disable_flag);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+ pci_set_master(pdev);
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake, false);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
- return retval;
+ return __e1000_shutdown(pdev, false);
}
static int e1000_resume(struct device *dev)
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (e1000e_pm_ready(adapter)) {
- bool wake;
-
- __e1000_shutdown(pdev, &wake, true);
- }
+ if (!e1000e_pm_ready(adapter))
+ return 0;
- return 0;
+ return __e1000_shutdown(pdev, true);
}
static int e1000_idle(struct device *dev)
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
static void e1000_shutdown(struct pci_dev *pdev)
{
- bool wake = false;
-
- __e1000_shutdown(pdev, &wake, false);
-
- if (system_state == SYSTEM_POWER_OFF)
- e1000_complete_shutdown(pdev, false, wake);
+ __e1000_shutdown(pdev, false);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- pci_set_master(pdev);
pdev->state_saved = true;
pci_restore_state(pdev);
+ pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 794fe1497666..a7e6a3e37257 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -42,6 +42,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 84e7e0909def..b64542acfa34 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.type) {
case e1000_phy_i210:
case e1000_phy_m88:
- if (hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID)
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
- else
+ break;
+ default:
ret_val = igb_copper_link_setup_m88(hw);
+ break;
+ }
break;
case e1000_phy_igp_3:
ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d27edbc63923..25151401c2ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,7 +447,7 @@ struct igb_adapter {
#endif
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
- struct igb_i2c_client_list *i2c_clients;
+ struct i2c_client *i2c_client;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0a9b073d0b03..4623502054d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,6 +39,10 @@
#include <linux/pci.h>
#ifdef CONFIG_IGB_HWMON
+struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
/* hwmon callback functions */
static ssize_t igb_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
unsigned int i;
int n_attrs;
int rc = 0;
+ struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
if (rc)
goto exit;
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
/* Allocation space for max attributes
* max num sensors * values (loc, temp, max, caution)
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed79a1c53b59..4dbd62968c7a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
-static const struct i2c_board_info i350_sensor_info = {
- I2C_BOARD_INFO("i350bb", 0Xf8),
-};
-
/* igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
*
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /* Guarantee this function can be used by verifying buffer sizes */
- BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
- NET_IP_ALIGN +
- IGB_TS_HDR_LEN +
- ETH_FRAME_LEN +
- ETH_FCS_LEN));
-
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
-static DEFINE_SPINLOCK(i2c_clients_lock);
-
-/* igb_get_i2c_client - returns matching client
- * in adapters's client list.
- * @adapter: adapter struct
- * @dev_addr: device address of i2c needed.
- */
-static struct i2c_client *
-igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
-{
- ulong flags;
- struct igb_i2c_client_list *client_list;
- struct i2c_client *client = NULL;
- struct i2c_board_info client_info = {
- I2C_BOARD_INFO("igb", 0x00),
- };
-
- spin_lock_irqsave(&i2c_clients_lock, flags);
- client_list = adapter->i2c_clients;
-
- /* See if we already have an i2c_client */
- while (client_list) {
- if (client_list->client->addr == (dev_addr >> 1)) {
- client = client_list->client;
- goto exit;
- } else {
- client_list = client_list->next;
- }
- }
-
- /* no client_list found, create a new one */
- client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
- if (client_list == NULL)
- goto exit;
-
- /* dev_addr passed to us is left-shifted by 1 bit
- * i2c_new_device call expects it to be flush to the right.
- */
- client_info.addr = dev_addr >> 1;
- client_info.platform_data = adapter;
- client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
- if (client_list->client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
- goto err_no_client;
- }
-
- /* insert new client at head of list */
- client_list->next = adapter->i2c_clients;
- adapter->i2c_clients = client_list;
-
- client = client_list->client;
- goto exit;
-
-err_no_client:
- kfree(client_list);
-exit:
- spin_unlock_irqrestore(&i2c_clients_lock, flags);
- return client;
-}
-
/* igb_read_i2c_byte - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = 0;
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
- struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ struct i2c_client *this_client = adapter->i2c_client;
s32 status;
u16 swfw_mask = E1000_SWFW_PHY0_SM;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 29140502b71a..6562c736a1d8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
/* mii management interface *************************************************/
+static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+{
+ u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+ u32 autoneg_disable = FORCE_LINK_PASS |
+ DISABLE_AUTO_NEG_SPEED_GMII |
+ DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
+ DISABLE_AUTO_NEG_FOR_DUPLEX;
+
+ if (mp->phy->autoneg == AUTONEG_ENABLE) {
+ /* enable auto negotiation */
+ pscr &= ~autoneg_disable;
+ goto out_write;
+ }
+
+ pscr |= autoneg_disable;
+
+ if (mp->phy->speed == SPEED_1000) {
+ /* force gigabit, half duplex not supported */
+ pscr |= SET_GMII_SPEED_TO_1000;
+ pscr |= SET_FULL_DUPLEX_MODE;
+ goto out_write;
+ }
+
+ pscr &= ~SET_GMII_SPEED_TO_1000;
+
+ if (mp->phy->speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+ else
+ pscr &= ~SET_MII_SPEED_TO_100;
+
+ if (mp->phy->duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ else
+ pscr &= ~SET_FULL_DUPLEX_MODE;
+
+out_write:
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
{
struct mv643xx_eth_shared_private *msp = dev_id;
@@ -1499,6 +1538,7 @@ static int
mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int ret;
if (mp->phy == NULL)
return -EINVAL;
@@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
cmd->advertising &= ~ADVERTISED_1000baseT_Half;
- return phy_ethtool_sset(mp->phy, cmd);
+ ret = phy_ethtool_sset(mp->phy, cmd);
+ if (!ret)
+ mv643xx_adjust_pscr(mp);
+ return ret;
}
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
@@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev)
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int ret;
- if (mp->phy != NULL)
- return phy_mii_ioctl(mp->phy, ifr, cmd);
+ if (mp->phy == NULL)
+ return -ENOTSUPP;
- return -EOPNOTSUPP;
+ ret = phy_mii_ioctl(mp->phy, ifr, cmd);
+ if (!ret)
+ mv643xx_adjust_pscr(mp);
+ return ret;
}
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 7e64033d7de3..0706623cfb96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
{
- u64 in_param;
+ u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb4d8d99f36d..995d4b6d5c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
- u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
-
- en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
- priv->dev->dev_addr);
- mlx4_unregister_mac(dev, priv->port, mac);
+ u64 mac;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+ mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ priv->dev->dev_addr);
+ mlx4_unregister_mac(dev, priv->port, mac);
+ } else {
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
struct hlist_head *bucket;
- unsigned int mac_hash;
+ unsigned int i;
- mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
- bucket = &priv->mac_hash[mac_hash];
- hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- if (ether_addr_equal_64bits(entry->mac,
- priv->dev->dev_addr)) {
- en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
- priv->port, priv->dev->dev_addr, qpn);
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+ mac = mlx4_en_mac_to_u64(entry->mac);
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unregister_mac(dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
- break;
}
}
+
+ en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
+ priv->port, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
}
}
@@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr)
return mac;
}
-static int mlx4_en_set_mac(struct net_device *dev, void *addr)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- queue_work(mdev->workqueue, &priv->mac_task);
- return 0;
-}
-
-static void mlx4_en_do_set_mac(struct work_struct *work)
+static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
{
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- mac_task);
- struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_en_replace_mac(priv, priv->base_qpn,
@@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
+ return err;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+
+ mutex_lock(&mdev->state_lock);
+ err = mlx4_en_do_set_mac(priv);
mutex_unlock(&mdev->state_lock);
+
+ return err;
}
static void mlx4_en_clear_list(struct net_device *dev)
@@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
- queue_work(mdev->workqueue, &priv->mac_task);
+ mlx4_en_do_set_mac(priv);
mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
}
mutex_unlock(&mdev->state_lock);
@@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
+ if (priv->mdev->dev->caps.comp_pool) {
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
+ if (!priv->dev->rx_cpu_rmap)
+ goto err;
+ }
#endif
return 0;
@@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 50917eb3013e..f6245579962d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ /* turn off device-managed steering capability if not enabled */
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ MLX4_GET(field, outbox->buf,
+ QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ field &= 0x7f;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d180bc46826a..16abde20e1fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
- u64 in_param;
+ u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf883345af88..d738454116a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
static inline void set_param_l(u64 *arg, u32 val)
{
- *((u32 *)arg) = val;
+ *arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
}
static inline void set_param_h(u64 *arg, u32 val)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c313d7e943a9..f710b7ce0dcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -509,7 +509,6 @@ struct mlx4_en_priv {
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 602ca9bf78e4..f91719a08cba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
- u64 in_param;
+ u64 in_param = 0;
u64 out_param;
int err;
@@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
- u64 in_param;
+ u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
@@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
- u64 in_param;
+ u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, index);
@@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
- u64 param;
+ u64 param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&param, index);
@@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
- u64 in_param;
+ u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, index);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1ac88637ad9d..00f223acada7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
- u64 in_param;
+ u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 719ead15e491..10c57c86388b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- u64 out_param;
+ u64 out_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
@@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- u64 out_param;
+ u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&out_param, port);
@@ -361,7 +361,7 @@ out:
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
- u64 out_param;
+ u64 out_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
@@ -406,7 +406,7 @@ out:
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
- u64 in_param;
+ u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 81e2abe07bbb..e891b058c1be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
{
- u64 in_param;
+ u64 in_param = 0;
u64 out_param;
int err;
@@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
- u64 in_param;
+ u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
@@ -319,7 +319,7 @@ err_out:
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
- u64 param;
+ u64 param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&param, qpn);
@@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
{
- u64 in_param;
+ u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 083fb48dc3d7..2995687f1aee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
u8 steer_type_mask = 2;
enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
+ return -EINVAL;
+
qpn = vhcr->in_modifier & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index feda6c00829f..e329fe1f11b7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
- u64 in_param;
+ u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, srqn);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8900398ba103..28fb50a1e9c3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- rtl_tx_performance_tweak(pdev,
- (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
}
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_disable_clock_request(pdev);
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index bf57b3cb16ab..0bc00991d310 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
tx_queue->txd.entries);
}
+ efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, true);
@@ -832,6 +833,7 @@ out:
efx_start_interrupts(efx, true);
efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
return rc;
rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);
- /* Stop the kernel transmit interface late, so the watchdog
- * timer isn't ticking over the flush */
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
netif_tx_disable(efx->net_dev);
efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
- efx_stop_all(efx);
-
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 50247dfe8f57..d2f790df6dcb 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
- netif_tx_lock(dev);
+ netif_tx_lock_bh(dev);
netif_device_detach(dev);
- netif_tx_unlock(dev);
+ netif_tx_unlock_bh(dev);
}
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2d756c1d7142..0a90abd2421b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -210,6 +210,7 @@ struct efx_tx_queue {
* Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free.
+ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state.
*/
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
struct sk_buff *skb;
struct page *page;
} u;
- unsigned int len;
+ u16 page_offset;
+ u16 len;
u16 flags;
};
#define EFX_RX_BUF_PAGE 0x0001
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 0ad790cc473c..eaa8e874a3cb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
return false;
tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+ && tx_queue->write_count - write_count == 1;
}
/* For each entry inserted into the software descriptor ring, create a
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b4..bb579a6128c8 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
- /* Offset is always within one page, so we don't need to consider
- * the page order.
- */
- return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
- efx->type->rx_buffer_hash_size;
+ return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
+ unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
+ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
+ page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+ struct efx_rx_buffer *rx_buf,
+ unsigned int used_len)
{
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
DMA_FROM_DEVICE);
+ } else if (used_len) {
+ dma_sync_single_for_cpu(&efx->pci_dev->dev,
+ rx_buf->dma_addr, used_len,
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}
- /* Release card resources - assumes all RX buffers consumed in-order
- * per RX queue
+ /* Release and/or sync DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue
*/
- efx_unmap_rx_buffer(efx, rx_buf);
+ efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7e93df6585e7..75c48558e6fd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
writel(vlan, &priv->host_port_regs->port_vlan);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < priv->data.slaves; i++)
slave_write(priv->slaves + i, vlan, reg);
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
/* If there is no more tx desc left free then we need to
* tell the kernel to stop sending us tx frames.
*/
- if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+ if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 52c05366599a..ae1b77aa199f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If there is no more tx desc left free then we need to
* tell the kernel to stop sending us tx frames.
*/
- if (unlikely(cpdma_check_free_tx_desc(priv->txchan)))
+ if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
netif_stop_queue(ndev);
return NETDEV_TX_OK;