summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce718..5b422be56165 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- pktlen = info & LEN_MASK;
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
- skb = rx_buff->skb;
- skb_put(skb, pktlen);
- skb->dev = ndev;
- skb->protocol = eth_type_trans(skb, ndev);
-
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
- /* Prepare the BD for next cycle */
- rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
- EMAC_BUFFER_SIZE);
- if (unlikely(!rx_buff->skb)) {
+ /* Prepare the BD for next cycle. netif_receive_skb()
+ * only if new skb was allocated and mapped to avoid holes
+ * in the RX fifo.
+ */
+ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "cannot allocate skb\n");
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
- /* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
- /* receive_skb only if new skb was allocated to avoid holes */
- netif_receive_skb(skb);
-
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
- netdev_err(ndev, "cannot dma map\n");
- dev_kfree_skb(rx_buff->skb);
+ netdev_err(ndev, "cannot map dma buffer\n");
+ dev_kfree_skb(skb);
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
+ stats->rx_dropped++;
continue;
}
+
+ /* unmap previosly mapped skb */
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ pktlen = info & LEN_MASK;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+ skb_put(rx_buff->skb, pktlen);
+ rx_buff->skb->dev = ndev;
+ rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+ netif_receive_skb(rx_buff->skb);
+
+ rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);