diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/genet/bcmgenet.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 382 |
1 files changed, 223 insertions, 159 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6befde61c203..275be56fd324 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -54,8 +54,10 @@ /* Default highest priority queue for multi queue support */ #define GENET_Q0_PRIORITY 0 -#define GENET_DEFAULT_BD_CNT \ - (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) #define RX_BUF_LENGTH 2048 #define SKB_ALIGNMENT 32 @@ -923,7 +925,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, tx_cb_ptr = ring->cbs; tx_cb_ptr += ring->write_ptr - ring->cb_ptr; - tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; + /* Advancing local write pointer */ if (ring->write_ptr == ring->end_ptr) ring->write_ptr = ring->cb_ptr; @@ -978,39 +980,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); - int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned int pkts_compl = 0; - unsigned int bds_compl; unsigned int c_index; + unsigned int txbds_ready; + unsigned int txbds_processed = 0; /* Compute how many buffers are transmitted since last xmit call */ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); - txq = netdev_get_tx_queue(dev, ring->queue); - - last_c_index = ring->c_index; - num_tx_bds = ring->size; + c_index &= DMA_C_INDEX_MASK; - c_index &= (num_tx_bds - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; + if (likely(c_index >= ring->c_index)) + txbds_ready = c_index - ring->c_index; else - last_tx_cn = num_tx_bds - last_c_index + c_index; + txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; netif_dbg(priv, tx_done, dev, - "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", - __func__, ring->index, - c_index, last_tx_cn, last_c_index); + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); /* Reclaim transmitted buffers */ - while (last_tx_cn-- > 0) { - tx_cb_ptr = ring->cbs + last_c_index; - bds_compl = 0; + while (txbds_processed < txbds_ready) { + tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; if (tx_cb_ptr->skb) { pkts_compl++; - bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; + dev->stats.tx_packets++; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), @@ -1026,20 +1021,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, DMA_TO_DEVICE); dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); } - dev->stats.tx_packets++; - ring->free_bds += bds_compl; - last_c_index++; - last_c_index &= (num_tx_bds - 1); + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; } + ring->free_bds += txbds_processed; + ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(dev, ring->queue); if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } - ring->c_index = c_index; - return pkts_compl; } @@ -1365,16 +1363,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) } dma_unmap_addr_set(cb, dma_addr, mapping); - /* assign packet, prepare descriptor, and advance pointer */ - - dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); - - /* turn on the newly assigned BD for DMA to use */ - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); - - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DMA_DESC_SIZE); + dmadesc_set_addr(priv, cb->bd_addr, mapping); return 0; } @@ -1383,8 +1372,10 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) * this could be called from bottom half, or from NAPI polling method. */ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, + unsigned int index, unsigned int budget) { + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; struct net_device *dev = priv->dev; struct enet_cb *cb; struct sk_buff *skb; @@ -1395,21 +1386,21 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, unsigned int p_index; unsigned int chksum_ok = 0; - p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); + p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); p_index &= DMA_P_INDEX_MASK; - if (p_index < priv->rx_c_index) - rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - - priv->rx_c_index + p_index; + if (likely(p_index >= ring->c_index)) + rxpkttoprocess = p_index - ring->c_index; else - rxpkttoprocess = p_index - priv->rx_c_index; + rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + + p_index; netif_dbg(priv, rx_status, dev, "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { - cb = &priv->rx_cbs[priv->rx_read_ptr]; + cb = &priv->rx_cbs[ring->read_ptr]; skb = cb->skb; /* We do not have a backing SKB, so we do not have a @@ -1432,10 +1423,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, if (!priv->desc_64b_en) { dma_length_status = - dmadesc_get_length_status(priv, - priv->rx_bds + - (priv->rx_read_ptr * - DMA_DESC_SIZE)); + dmadesc_get_length_status(priv, cb->bd_addr); } else { struct status_64 *status; @@ -1451,8 +1439,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, netif_dbg(priv, rx_status, dev, "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", - __func__, p_index, priv->rx_c_index, - priv->rx_read_ptr, dma_length_status); + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, @@ -1530,25 +1518,31 @@ refill: } rxpktprocessed++; - priv->rx_read_ptr++; - priv->rx_read_ptr &= (priv->num_rx_bds - 1); + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX); } return rxpktprocessed; } /* Assign skb to RX DMA descriptor. */ -static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) { struct enet_cb *cb; int ret = 0; int i; - netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); /* loop here for each buffer needing assign */ - for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; if (cb->skb) continue; @@ -1710,17 +1704,14 @@ static int init_umac(struct bcmgenet_priv *priv) return 0; } -/* Initialize all house-keeping variables for a TX ring, along - * with corresponding hardware registers - */ +/* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, - unsigned int write_ptr, unsigned int end_ptr) + unsigned int start_ptr, unsigned int end_ptr) { struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); u32 flow_period_val = 0; - unsigned int first_bd; spin_lock_init(&ring->lock); ring->priv = priv; @@ -1735,12 +1726,13 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ring->int_enable = bcmgenet_tx_ring_int_enable; ring->int_disable = bcmgenet_tx_ring_int_disable; } - ring->cbs = priv->tx_cbs + write_ptr; + ring->cbs = priv->tx_cbs + start_ptr; ring->size = size; + ring->clean_ptr = start_ptr; ring->c_index = 0; ring->free_bds = size; - ring->write_ptr = write_ptr; - ring->cb_ptr = write_ptr; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; ring->end_ptr = end_ptr - 1; ring->prod_index = 0; @@ -1754,19 +1746,16 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, /* Disable rate control for now */ bcmgenet_tdma_ring_writel(priv, index, flow_period_val, TDMA_FLOW_PERIOD); - /* Unclassified traffic goes to ring 16 */ bcmgenet_tdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - first_bd = write_ptr; - /* Set start and end address, read and write pointers */ - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, DMA_START_ADDR); - bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_READ_PTR); - bcmgenet_tdma_ring_writel(priv, index, first_bd, + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); @@ -1785,118 +1774,176 @@ static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, /* Initialize a RDMA ring */ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, - unsigned int index, unsigned int size) + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) { + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; u32 words_per_bd = WORDS_PER_BD(priv); int ret; - priv->num_rx_bds = TOTAL_DESC; - priv->rx_bds = priv->base + priv->hw_params->rdma_offset; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; - priv->rx_c_index = 0; - priv->rx_read_ptr = 0; - priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), - GFP_KERNEL); - if (!priv->rx_cbs) - return -ENOMEM; + ring->index = index; + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; - ret = bcmgenet_alloc_rx_buffers(priv); - if (ret) { - kfree(priv->rx_cbs); + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) return ret; - } - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); bcmgenet_rdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); - bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); - bcmgenet_rdma_ring_writel(priv, index, - words_per_bd * size - 1, DMA_END_ADDR); bcmgenet_rdma_ring_writel(priv, index, (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); - bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); return ret; } -/* init multi xmit queues, only available for GENET2+ - * the queue is partitioned as follows: +/* Initialize Tx queues * - * queue 0 - 3 is priority based, each one has 32 descriptors, + * Queues 0-3 are priority-based, each one has 32 descriptors, * with queue 0 being the highest priority queue. * - * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT - * descriptors: 256 - (number of tx queues * bds per queues) = 128 - * descriptors. + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. * - * The transmit control block pool is then partitioned as following: - * - tx_cbs[0...127] are for queue 16 - * - tx_ring_cbs[0] points to tx_cbs[128..159] - * - tx_ring_cbs[1] points to tx_cbs[160..191] - * - tx_ring_cbs[2] points to tx_cbs[192..223] - * - tx_ring_cbs[3] points to tx_cbs[224..255] + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] */ -static void bcmgenet_init_multiq(struct net_device *dev) +static void bcmgenet_init_tx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned int i, dma_enable; - u32 reg, dma_ctrl, ring_cfg = 0; + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; u32 dma_priority[3] = {0, 0, 0}; - if (!netif_is_multiqueue(dev)) { - netdev_warn(dev, "called with non multi queue aware HW\n"); - return; - } - dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); dma_enable = dma_ctrl & DMA_EN; dma_ctrl &= ~DMA_EN; bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + dma_ctrl = 0; + ring_cfg = 0; + /* Enable strict priority arbiter mode */ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + /* Initialize Tx priority queues */ for (i = 0; i < priv->hw_params->tx_queues; i++) { - /* first 64 tx_cbs are reserved for default tx queue - * (ring 16) - */ - bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, - i * priv->hw_params->bds_cnt, - (i + 1) * priv->hw_params->bds_cnt); - - /* Configure ring as descriptor ring and setup priority */ - ring_cfg |= 1 << i; - dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); - + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(i)] |= ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); } - /* Set ring 16 priority and program the hardware registers */ + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + /* Enable rings */ - reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); - reg |= ring_cfg; - bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); /* Configure ring as descriptor ring and re-enable DMA if enabled */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; if (dma_enable) - reg |= DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; } static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) @@ -1985,19 +2032,36 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Init rDma */ + bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; - netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } - /* by default, enable ring 16 (descriptor based) */ - ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); if (ret) { - netdev_err(priv->dev, "failed to initialize RX ring\n"); + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); return ret; } - /* init rDma */ - bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); - /* Init tDma */ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); @@ -2011,14 +2075,13 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) return -ENOMEM; } - /* initialize multi xmit queue */ - bcmgenet_init_multiq(priv->dev); + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } - /* initialize special ring 16 */ - bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, - priv->hw_params->tx_queues * - priv->hw_params->bds_cnt, - TOTAL_DESC); + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); return 0; } @@ -2030,13 +2093,8 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) struct bcmgenet_priv, napi); unsigned int work_done; - work_done = bcmgenet_desc_rx(priv, budget); + work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget); - /* Advancing our consumer index*/ - priv->rx_c_index += work_done; - priv->rx_c_index &= DMA_C_INDEX_MASK; - bcmgenet_rdma_ring_writel(priv, DESC_INDEX, - priv->rx_c_index, RDMA_CONS_INDEX); if (work_done < budget) { napi_complete(napi); bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, @@ -2499,8 +2557,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = { static struct bcmgenet_hw_params bcmgenet_hw_params[] = { [GENET_V1] = { .tx_queues = 0, + .tx_bds_per_q = 0, .rx_queues = 0, - .bds_cnt = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2512,8 +2571,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V2] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 16, .bp_in_mask = 0xffff, .hfb_filter_cnt = 16, @@ -2528,8 +2588,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V3] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2544,8 +2605,9 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { }, [GENET_V4] = { .tx_queues = 4, - .rx_queues = 4, - .bds_cnt = 32, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, @@ -2645,14 +2707,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) #endif pr_debug("Configuration for version: %d\n" - "TXq: %1d, RXq: %1d, BDs: %1d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" "BP << en: %2d, BP msk: 0x%05x\n" "HFB count: %2d, QTAQ msk: 0x%05x\n" "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" "RDMA: 0x%05x, TDMA: 0x%05x\n" "Words/BD: %d\n", priv->version, - params->tx_queues, params->rx_queues, params->bds_cnt, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, params->bp_in_en_shift, params->bp_in_mask, params->hfb_filter_cnt, params->qtag_mask, params->tbuf_offset, params->hfb_offset, @@ -2680,8 +2743,9 @@ static int bcmgenet_probe(struct platform_device *pdev) struct resource *r; int err = -EIO; - /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ - dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); if (!dev) { dev_err(&pdev->dev, "can't allocate net device\n"); return -ENOMEM; |