diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bcmsysport.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.c | 170 |
1 files changed, 121 insertions, 49 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 783543ad1fcf..909ad7a0d480 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -456,6 +456,67 @@ static int bcm_sysport_set_wol(struct net_device *dev, return 0; } +static int bcm_sysport_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u32 reg; + + reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); + + ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; + ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; + + reg = rdma_readl(priv, RDMA_MBDONE_INTR); + + ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; + ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; + + return 0; +} + +static int bcm_sysport_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned int i; + u32 reg; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yield roughly 8.192 us, our maximum value has + * to fit in the RING_TIMEOUT_MASK (16 bits). + */ + if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || + ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || + ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || + ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || + (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + for (i = 0; i < dev->num_tx_queues; i++) { + reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i)); + reg &= ~(RING_INTR_THRESH_MASK | + RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); + reg |= ec->tx_max_coalesced_frames; + reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << + RING_TIMEOUT_SHIFT; + tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i)); + } + + reg = rdma_readl(priv, RDMA_MBDONE_INTR); + reg &= ~(RDMA_INTR_THRESH_MASK | + RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); + reg |= ec->rx_max_coalesced_frames; + reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) << + RDMA_TIMEOUT_SHIFT; + rdma_writel(priv, reg, RDMA_MBDONE_INTR); + + return 0; +} + static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) { dev_kfree_skb_any(cb->skb); @@ -463,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) dma_unmap_addr_set(cb, dma_addr, 0); } -static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, - struct bcm_sysport_cb *cb) +static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, + struct bcm_sysport_cb *cb) { struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; + struct sk_buff *skb, *rx_skb; dma_addr_t mapping; - int ret; - cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); - if (!cb->skb) { + /* Allocate a new SKB for a new packet */ + skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); - return -ENOMEM; + return NULL; } - mapping = dma_map_single(kdev, cb->skb->data, + mapping = dma_map_single(kdev, skb->data, RX_BUF_LENGTH, DMA_FROM_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { + if (dma_mapping_error(kdev, mapping)) { priv->mib.rx_dma_failed++; - bcm_sysport_free_cb(cb); + dev_kfree_skb_any(skb); netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); - return ret; + return NULL; } - dma_unmap_addr_set(cb, dma_addr, mapping); - dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + /* Grab the current SKB on the ring */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + RX_BUF_LENGTH, DMA_FROM_DEVICE); - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DESC_SIZE); + /* Put the new SKB on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_desc_set_addr(priv, cb->bd_addr, mapping); netif_dbg(priv, rx_status, ndev, "RX refill\n"); - return 0; + /* Return the current SKB to the caller */ + return rx_skb; } static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) { struct bcm_sysport_cb *cb; - int ret = 0; + struct sk_buff *skb; unsigned int i; for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; - if (cb->skb) - continue; - - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - break; + cb = &priv->rx_cbs[i]; + skb = bcm_sysport_rx_refill(priv, cb); + if (skb) + dev_kfree_skb(skb); + if (!cb->skb) + return -ENOMEM; } - return ret; + return 0; } /* Poll the hardware for up to budget packets to process */ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { - struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -531,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int p_index; u16 len, status; struct bcm_rsb *rsb; - int ret; /* Determine how much we should process since last call */ p_index = rdma_readl(priv, RDMA_PROD_INDEX); @@ -549,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, while ((processed < to_process) && (processed < budget)) { cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; + skb = bcm_sysport_rx_refill(priv, cb); - processed++; - priv->rx_read_ptr++; - - if (priv->rx_read_ptr == priv->num_rx_bds) - priv->rx_read_ptr = 0; /* We do not have a backing SKB, so we do not a corresponding * DMA mapping for this incoming packet since @@ -566,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, netif_err(priv, rx_err, ndev, "out of memory!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - goto refill; + goto next; } - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), - RX_BUF_LENGTH, DMA_FROM_DEVICE); - /* Extract the Receive Status Block prepended */ rsb = (struct bcm_rsb *)skb->data; len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; @@ -583,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, p_index, priv->rx_c_index, priv->rx_read_ptr, len, status); + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, ndev, "oversized packet\n"); + ndev->stats.rx_length_errors++; + ndev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { @@ -597,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_over_errors++; ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } skb_put(skb, len); @@ -625,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_bytes += len; napi_gro_receive(&priv->napi, skb); -refill: - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - priv->mib.alloc_rx_buff_failed++; +next: + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; } return processed; @@ -1269,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) { + struct bcm_sysport_cb *cb; u32 reg; int ret; + int i; /* Initialize SW view of the RX ring */ priv->num_rx_bds = NUM_RX_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; priv->rx_c_index = 0; priv->rx_read_ptr = 0; priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), @@ -1286,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) return -ENOMEM; } + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DESC_SIZE; + } + ret = bcm_sysport_alloc_rx_bufs(priv); if (ret) { netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); @@ -1641,6 +1711,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = { .get_sset_count = bcm_sysport_get_sset_count, .get_wol = bcm_sysport_get_wol, .set_wol = bcm_sysport_set_wol, + .get_coalesce = bcm_sysport_get_coalesce, + .set_coalesce = bcm_sysport_set_coalesce, }; static const struct net_device_ops bcm_sysport_netdev_ops = { |