summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/bnx2x_main.c14
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/fec.c54
-rw-r--r--drivers/net/ibmveth.c219
-rw-r--r--drivers/net/ibmveth.h5
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mlx4/catas.c1
-rw-r--r--drivers/net/mlx4/cmd.c5
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/eq.c5
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h4
-rw-r--r--drivers/net/mlx4/icm.c2
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c1
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h3
-rw-r--r--drivers/net/mlx4/mr.c51
-rw-r--r--drivers/net/mlx4/pd.c7
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c1
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/ppp_generic.c6
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/s2io.c48
-rw-r--r--drivers/net/sfc/rx.c4
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tc35815.c4
-rw-r--r--drivers/net/virtio_net.c114
-rw-r--r--drivers/net/wireless/ath5k/base.c4
40 files changed, 434 insertions, 226 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 7a14980f3472..18d3eeb7eab2 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
goto err;
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(d)) {
+ if (dma_mapping_error(NULL, d)) {
free_page((unsigned long)page);
goto err;
}
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 0263bef9cc6d..af251a5df844 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* release skb */
- BUG_TRAP(skb);
+ WARN_ON(!skb);
dev_kfree_skb(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
#ifdef BNX2X_STOP_ON_ERROR
- BUG_TRAP(used >= 0);
- BUG_TRAP(used <= fp->bp->tx_ring_size);
- BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
+ WARN_ON(used < 0);
+ WARN_ON(used > fp->bp->tx_ring_size);
+ WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
#endif
return (s16)(fp->bp->tx_ring_size) - used;
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(mapping))) {
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
@@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- BUG_TRAP(ring_prod > i);
+ WARN_ON(ring_prod <= i);
}
fp->rx_bd_prod = ring_prod;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 83768df27806..f1936d51b458 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
if (page_count(page->buffer) > 1)
continue;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a96331c875e6..1b0861d73ab7 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
dma_addr_t mapping;
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(mapping)))
+ if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
pci_unmap_addr_set(sd, dma_addr, mapping);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1037b1332312..19d32a227be1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rx->dma_addr)) {
+ if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a14561f40db0..9350564065e7 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->buffer_info[i].dma =
pci_map_single(pdev, skb->data, 2048,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
+ if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 869544b8c05c..d13677899767 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -195,7 +195,7 @@ map_skb:
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(ps_page->dma)) {
+ if (pci_dma_mapping_error(pdev, ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
buffer_info->dma = pci_map_single(pdev, skb->data,
adapter->rx_ps_bsize0,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
skb->data + offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
adapter->tx_dma_failed++;
return -1;
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
offset,
size,
PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
+ if (pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)) {
dev_err(&adapter->pdev->dev,
"TX DMA page map failed\n");
adapter->tx_dma_failed++;
@@ -4067,8 +4068,6 @@ static void e1000_netpoll(struct net_device *netdev)
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev);
- e1000_clean_tx_irq(adapter);
-
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 32a4f17d35fc..ecd5c71a7a8a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -2,12 +2,6 @@
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
*
- * This version of the driver is specific to the FADS implementation,
- * since the board contains control registers external to the processor
- * for the control of the LevelOne LXT970 transceiver. The MPC860T manual
- * describes connections using the internal parallel port I/O, which
- * is basically all of Port D.
- *
* Right now, I am very wasteful with the buffers. I allocate memory
* pages and then divide them into 2K frame buffers. This way I know I
* have buffers large enough to hold one frame within one buffer descriptor.
@@ -49,17 +43,9 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
- defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
-#else
-#include <asm/8xx_immap.h>
-#include <asm/mpc8xx.h>
-#include "commproc.h"
-#endif
#if defined(CONFIG_FEC2)
#define FEC_MAX_PORTS 2
@@ -67,7 +53,7 @@
#define FEC_MAX_PORTS 1
#endif
-#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272)
+#if defined(CONFIG_M5272)
#define HAVE_mii_link_interrupt
#endif
@@ -1235,14 +1221,9 @@ static phy_info_t const * const phy_info[] = {
/* ------------------------------------------------------------------------- */
#ifdef HAVE_mii_link_interrupt
-#ifdef CONFIG_RPXCLASSIC
-static void
-mii_link_interrupt(void *dev_id);
-#else
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id);
#endif
-#endif
#if defined(CONFIG_M5272)
/*
@@ -1795,24 +1776,6 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0)
panic("Could not allocate FEC IRQ!");
-
-#ifdef CONFIG_RPXCLASSIC
- /* Make Port C, bit 15 an input that causes interrupts.
- */
- immap->im_ioport.iop_pcpar &= ~0x0001;
- immap->im_ioport.iop_pcdir &= ~0x0001;
- immap->im_ioport.iop_pcso &= ~0x0001;
- immap->im_ioport.iop_pcint |= 0x0001;
- cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
-
- /* Make LEDS reflect Link status.
- */
- *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
-#endif
-#ifdef CONFIG_FADS
- if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0)
- panic("Could not allocate MII IRQ!");
-#endif
}
static void __inline__ fec_get_mac(struct net_device *dev)
@@ -1821,16 +1784,6 @@ static void __inline__ fec_get_mac(struct net_device *dev)
bd = (bd_t *)__res;
memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN);
-
-#ifdef CONFIG_RPXCLASSIC
- /* The Embedded Planet boards have only one MAC address in
- * the EEPROM, but can have two Ethernet ports. For the
- * FEC port, we create another address by setting one of
- * the address bits above something that would have (up to
- * now) been allocated.
- */
- dev->dev_adrd[3] |= 0x80;
-#endif
}
static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
@@ -2109,13 +2062,8 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
/* This interrupt occurs when the PHY detects a link change.
*/
#ifdef HAVE_mii_link_interrupt
-#ifdef CONFIG_RPXCLASSIC
-static void
-mii_link_interrupt(void *dev_id)
-#else
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id)
-#endif
{
struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 00527805e4f1..91ec9fdc7184 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -33,6 +33,7 @@
*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -52,7 +53,9 @@
#include <asm/hvcall.h>
#include <asm/atomic.h>
#include <asm/vio.h>
+#include <asm/iommu.h>
#include <asm/uaccess.h>
+#include <asm/firmware.h>
#include <linux/seq_file.h>
#include "ibmveth.h"
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
+
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "ibmveth"
static struct proc_dir_entry *ibmveth_proc_dir;
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
u32 i;
u32 count = pool->size - atomic_read(&pool->available);
u32 buffers_added = 0;
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
mb();
for(i = 0; i < count; ++i) {
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- dma_addr_t dma_addr;
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
+ goto failure;
+
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_SUCCESS) {
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
- break;
- } else {
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+ else {
buffers_added++;
adapter->replenish_add_buff_success++;
}
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
mb();
atomic_add(buffers_added, &(pool->available));
+ return;
+
+failure:
+ pool->free_map[free_index] = index;
+ pool->skbuff[index] = NULL;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
+ if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
}
/* replenish routine */
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++;
- for(i = 0; i < IbmVethNumBufferPools; i++)
+ for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
if(adapter->rx_buff_pool[i].active)
ibmveth_replenish_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
@@ -433,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{
int i;
+ struct device *dev = &adapter->vdev->dev;
if(adapter->buffer_list_addr != NULL) {
- if(!dma_mapping_error(adapter->buffer_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->buffer_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
@@ -446,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->filter_list_addr != NULL) {
- if(!dma_mapping_error(adapter->filter_list_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->filter_list_dma, 4096,
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
@@ -457,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
if(adapter->rx_queue.queue_addr != NULL) {
- if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
adapter->rx_queue.queue_dma,
adapter->rx_queue.queue_len,
DMA_BIDIRECTIONAL);
@@ -472,6 +486,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
+
+ if (adapter->bounce_buffer != NULL) {
+ if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
}
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
@@ -508,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
int rc;
union ibmveth_buf_desc rxq_desc;
int i;
+ struct device *dev;
ibmveth_debug_printk("open starting\n");
@@ -536,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
return -ENOMEM;
}
- adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
- if((dma_mapping_error(adapter->buffer_list_dma) ) ||
- (dma_mapping_error(adapter->filter_list_dma)) ||
- (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -607,6 +636,24 @@ static int ibmveth_open(struct net_device *netdev)
return rc;
}
+ adapter->bounce_buffer =
+ kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ ibmveth_error_printk("unable to allocate bounce buffer\n");
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return -ENOMEM;
+ }
+ adapter->bounce_buffer_dma =
+ dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
+ netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ ibmveth_error_printk("unable to map bounce buffer\n");
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return -ENOMEM;
+ }
+
ibmveth_debug_printk("initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
@@ -853,10 +900,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_packets = 0;
unsigned int tx_send_failed = 0;
unsigned int tx_map_failed = 0;
+ int used_bounce = 0;
+ unsigned long data_dma_addr;
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
- desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
+ data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -875,12 +924,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
- if (dma_mapping_error(desc.fields.address)) {
- ibmveth_error_printk("tx: unable to map xmit buffer\n");
+ if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ ibmveth_error_printk("tx: unable to map xmit buffer\n");
+ skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+ skb->len);
+ desc.fields.address = adapter->bounce_buffer_dma;
tx_map_failed++;
- tx_dropped++;
- goto out;
- }
+ used_bounce = 1;
+ } else
+ desc.fields.address = data_dma_addr;
/* send the frame. Arbitrarily set retrycount to 1024 */
correlator = 0;
@@ -904,8 +957,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
}
- dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
- skb->len, DMA_TO_DEVICE);
+ if (!used_bounce)
+ dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
+ skb->len, DMA_TO_DEVICE);
out: spin_lock_irqsave(&adapter->stats_lock, flags);
netdev->stats.tx_dropped += tx_dropped;
@@ -1053,9 +1107,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{
struct ibmveth_adapter *adapter = dev->priv;
+ struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int reinit = 0;
- int i, rc;
+ int i;
if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
@@ -1067,23 +1121,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
if (i == IbmVethNumBufferPools)
return -EINVAL;
+ /* Deactivate all the buffer pools so that the next loop can activate
+ only the buffer pools necessary to hold the new MTU */
+ for (i = 0; i < IbmVethNumBufferPools; i++)
+ if (adapter->rx_buff_pool[i].active) {
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ adapter->rx_buff_pool[i].active = 0;
+ }
+
/* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) {
- if (!adapter->rx_buff_pool[i].active) {
- adapter->rx_buff_pool[i].active = 1;
- reinit = 1;
- }
+ adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- if (reinit && netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
adapter->pool_config = 1;
ibmveth_close(adapter->netdev);
adapter->pool_config = 0;
dev->mtu = new_mtu;
- if ((rc = ibmveth_open(adapter->netdev)))
- return rc;
- } else
- dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
+ return ibmveth_open(adapter->netdev);
+ }
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
return 0;
}
}
@@ -1098,6 +1163,46 @@ static void ibmveth_poll_controller(struct net_device *dev)
}
#endif
+/**
+ * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmveth_adapter *adapter;
+ unsigned long ret;
+ int i;
+ int rxqentries = 1;
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (netdev == NULL)
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+
+ adapter = netdev_priv(netdev);
+
+ ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+
+ for (i = 0; i < IbmVethNumBufferPools; i++) {
+ /* add the size of the active receive buffers */
+ if (adapter->rx_buff_pool[i].active)
+ ret +=
+ adapter->rx_buff_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ buff_size);
+ rxqentries += adapter->rx_buff_pool[i].size;
+ }
+ /* add the size of the receive queue entries */
+ ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+
+ return ret;
+}
+
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
@@ -1242,6 +1347,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
ibmveth_proc_unregister_adapter(adapter);
free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
return 0;
}
@@ -1402,14 +1509,15 @@ const char * buf, size_t count)
return -EPERM;
}
- pool->active = 0;
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
+ pool->active = 0;
adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
}
+ pool->active = 0;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
@@ -1485,6 +1593,7 @@ static struct vio_driver ibmveth_driver = {
.id_table = ibmveth_device_table,
.probe = ibmveth_probe,
.remove = ibmveth_remove,
+ .get_desired_dma = ibmveth_get_desired_dma,
.driver = {
.name = ibmveth_driver_name,
.owner = THIS_MODULE,
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 41f61cd18852..d28186948752 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address,
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
#define IbmVethNumBufferPools 5
+#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
#define IBMVETH_MAX_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_BUFF_LIST_SIZE 4096
+#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
@@ -143,6 +146,8 @@ struct ibmveth_adapter {
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
+ void *bounce_buffer;
+ dma_addr_t bounce_buffer_dma;
/* adapter specific stats */
u64 replenish_task_cycles;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index b8d0639c1cdf..c46864d626b2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
msg->data.addr[0] = dma_map_single(port->dev, skb->data,
skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(msg->data.addr[0]))
+ if (dma_mapping_error(port->dev, msg->data.addr[0]))
goto recycle_and_drop;
msg->dev = port->dev;
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
dma_address = msg->data.addr[0];
dma_length = msg->data.len[0];
- if (!dma_mapping_error(dma_address))
+ if (!dma_mapping_error(msg->dev, dma_address))
dma_unmap_single(msg->dev, dma_address, dma_length,
DMA_TO_DEVICE);
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f9d6b4dca180..096bca54bcf7 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index aa9528779044..f094ee00c416 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 70dff94a8bc6..2845a0560b84 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -67,6 +67,8 @@ enum {
CMD_STAT_BAD_INDEX = 0x0a,
/* FW image corrupted: */
CMD_STAT_BAD_NVMEM = 0x0b,
+ /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
+ CMD_STAT_ICM_ERROR = 0x0c,
/* Attempt to modify a QP/EE which is not in the presumed state: */
CMD_STAT_BAD_QP_STATE = 0x10,
/* Bad segment parameters (Address/Size): */
@@ -119,6 +121,7 @@ static int mlx4_status_to_errno(u8 status)
[CMD_STAT_BAD_RES_STATE] = -EBADF,
[CMD_STAT_BAD_INDEX] = -EBADF,
[CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [CMD_STAT_ICM_ERROR] = -ENFILE,
[CMD_STAT_BAD_QP_STATE] = -EINVAL,
[CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
[CMD_STAT_REG_BOUND] = -EBUSY,
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 95e87a2f8896..9bb50e3f8974 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a1513f07..8a8b56135a58 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -33,6 +33,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
@@ -525,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
return -ENOMEM;
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+ if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
__free_page(priv->eq_table.icm_page);
return -ENOMEM;
}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 2b5006b9be67..7e32955da982 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -46,6 +46,10 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
+static int enable_qos;
+module_param(enable_qos, bool, 0444);
+MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
@@ -198,7 +202,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
-#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
+#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
@@ -373,12 +377,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- if (dev_cap->bmme_flags & 1)
- mlx4_dbg(dev, "Base MM extensions: yes "
- "(flags %d, rsvd L_Key %08x)\n",
- dev_cap->bmme_flags, dev_cap->reserved_lkey);
- else
- mlx4_dbg(dev, "Base MM extensions: no\n");
+ mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
@@ -737,6 +737,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
+ /* Enable QoS support if module parameter set */
+ if (enable_qos)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index a0e046c149b7..decbb5c2ad41 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -98,7 +98,7 @@ struct mlx4_dev_cap {
int cmpt_entry_sz;
int mtt_entry_sz;
int resize_srq;
- u8 bmme_flags;
+ u32 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
int max_gso_sz;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 2a5bef6388fe..baf4bf66062c 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index 6c44edf35847..ab56a2f89b65 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 4a6c4d526f1b..0e7eb1038f9f 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d3736013fe9b..1252a919de2e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -158,6 +158,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
+ dev->caps.bmme_flags = dev_cap->bmme_flags;
+ dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index b4b57870ddfd..c83f88ce0736 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index a4023c2dd050..5337e3ac3e78 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -118,6 +118,7 @@ struct mlx4_bitmap {
struct mlx4_buddy {
unsigned long **bits;
+ unsigned int *num_free;
int max_order;
spinlock_t lock;
};
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 03a9abcce524..62071d9c4a55 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -47,7 +47,7 @@ struct mlx4_mpt_entry {
__be32 flags;
__be32 qpn;
__be32 key;
- __be32 pd;
+ __be32 pd_flags;
__be64 start;
__be64 length;
__be32 lkey;
@@ -61,11 +61,15 @@ struct mlx4_mpt_entry {
} __attribute__((packed));
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
+#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
+
#define MLX4_MTT_FLAG_PRESENT 1
#define MLX4_MPT_STATUS_SW 0xF0
@@ -79,23 +83,26 @@ static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
spin_lock(&buddy->lock);
- for (o = order; o <= buddy->max_order; ++o) {
- m = 1 << (buddy->max_order - o);
- seg = find_first_bit(buddy->bits[o], m);
- if (seg < m)
- goto found;
- }
+ for (o = order; o <= buddy->max_order; ++o)
+ if (buddy->num_free[o]) {
+ m = 1 << (buddy->max_order - o);
+ seg = find_first_bit(buddy->bits[o], m);
+ if (seg < m)
+ goto found;
+ }
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
+ --buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
+ ++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
@@ -113,11 +120,13 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
+ --buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
+ ++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
@@ -131,7 +140,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
- if (!buddy->bits)
+ buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+ GFP_KERNEL);
+ if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
@@ -143,6 +154,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
}
set_bit(0, buddy->bits[buddy->max_order]);
+ buddy->num_free[buddy->max_order] = 1;
return 0;
@@ -150,9 +162,10 @@ err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
+err_out:
kfree(buddy->bits);
+ kfree(buddy->num_free);
-err_out:
return -ENOMEM;
}
@@ -164,6 +177,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->bits[i]);
kfree(buddy->bits);
+ kfree(buddy->num_free);
}
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
@@ -314,21 +328,30 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
memset(mpt_entry, 0, sizeof *mpt_entry);
- mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS |
- MLX4_MPT_FLAG_MIO |
+ mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
- mpt_entry->pd = cpu_to_be32(mr->pd);
+ mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->start = cpu_to_be64(mr->iova);
mpt_entry->length = cpu_to_be64(mr->size);
mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_seg = 0;
- } else
+ } else {
mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ }
+
+ if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+ /* fast register MR in free state */
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
+ } else {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+ }
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 3a93c5f0f7ab..aa616892d09c 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -91,6 +91,13 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
+ if (dev->caps.num_uars <= 128) {
+ mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
+ dev->caps.num_uars);
+ mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
+ return -ENODEV;
+ }
+
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
max(128, dev->caps.reserved_uars));
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index ee5484c44a18..c49a86044bf7 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e199715fabd0..3951b884c0fb 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index d23f46d692ef..533eb6db24b3 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 993d87c9296f..edc0fd588985 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(dma))) {
+ if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
dev_kfree_skb_irq(info->skb);
break;
}
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
PCI_DMA_TODEVICE);
map_size[0] = skb_headlen(skb);
- if (dma_mapping_error(map[0]))
+ if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
goto out_err_nolock;
for (i = 0; i < nfrags; i++) {
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
map_size[i+1] = frag->size;
- if (dma_mapping_error(map[i+1])) {
+ if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
nfrags = i;
goto out_err_nolock;
}
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 739b3ab7bccc..ddccc074a76a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
}
- if (atomic_read(&file->f_count) <= 2) {
+ if (atomic_long_read(&file->f_count) <= 2) {
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
- atomic_read(&file->f_count));
+ printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
unlock_kernel();
return err;
}
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e7d48a352beb..e82b37bbd6c3 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
qdev->lrg_buffer_len -
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
*/
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
sizeof(struct oal),
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
qdev->ndev->name, err);
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
QL_HEADER_SPACE,
PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(map);
+ err = pci_dma_mapping_error(qdev->pdev, map);
if(err) {
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
qdev->ndev->name, err);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 9dae40ccf048..86d77d05190a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
* Return Value:
* SUCCESS on success or an appropriate -ve value on failure.
*/
-
-static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+ int from_card_up)
{
struct sk_buff *skb;
struct RxD_t *rxdp;
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp1->Buffer0_ptr = pci_map_single
(ring->pdev, skb->data, size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0,
BUF0_LEN, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer0_ptr))
goto pci_map_failed;
} else
pci_dma_sync_single_for_device(ring->pdev,
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
(ring->pdev, skb->data, ring->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer2_ptr))
goto pci_map_failed;
if (from_card_up) {
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error
- (rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single
(ring->pdev,
(dma_addr_t)(unsigned long)
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
}
}
-static int s2io_chk_rx_buffers(struct ring_info *ring)
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
{
- if (fill_rx_buffers(ring, 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
return 0;
pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
netif_rx_complete(dev, napi);
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < config->rx_ring_num; i++) {
ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed;
budget -= ring_pkts_processed;
if (budget <= 0)
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
+ if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+ -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break;
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long) skb;
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
netif_rx_schedule(dev, &ring->napi);
} else {
rx_intr_handler(ring, 0);
- s2io_chk_rx_buffers(ring);
+ s2io_chk_rx_buffers(sp, ring);
}
return IRQ_HANDLED;
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
*/
if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++)
- s2io_chk_rx_buffers(&mac_control->rings[i]);
+ s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single( sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
goto memalloc_failed;
rxdp->Host_Control = (unsigned long) (*skb);
}
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+ if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
goto memalloc_failed;
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer0_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4, PCI_DMA_FROMDEVICE);
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer1_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].mtu = dev->mtu;
- ret = fill_rx_buffers(&mac_control->rings[i], 1);
+ ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 601b001437c0..0d27dd39bc09 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
rx_buf->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
dev_kfree_skb_any(rx_buf->skb);
rx_buf->skb = NULL;
return -EIO;
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
return -EIO;
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5cdd082ab8f6..5e8374ab28ee 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(dma_addr)))
+ if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
goto pci_err;
/* Store fields for marking in the per-fragment final
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
TSOH_BUFFER(tsoh), header_len,
PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+ if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
st->ifc.unmap_len = len;
st->ifc.len = len;
st->ifc.dma_addr = st->ifc.unmap_addr;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 711e4a8948e0..5257cf464f1a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1829,9 +1829,6 @@ static int sky2_down(struct net_device *dev)
if (netif_msg_ifdown(sky2))
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
- /* Stop more packets from being queued */
- netif_stop_queue(dev);
-
/* Disable port IRQ */
imask = sky2_read32(hw, B0_IMSK);
imask &= ~portirq_msk[port];
@@ -1887,8 +1884,6 @@ static int sky2_down(struct net_device *dev)
sky2_phy_power_down(hw, port);
- netif_carrier_off(dev);
-
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 00aa0b108cb9..b6435d0d71f9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* iommu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
if (netif_msg_rx_err(card) && net_ratelimit())
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
unsigned long flags;
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(buf)) {
+ if (pci_dma_mapping_error(card->pdev, buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index a645e5028c14..8487ace9d2e3 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
return NULL;
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
free_page((unsigned long)buf);
return NULL;
}
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
return NULL;
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(*dma_handle)) {
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c28d7cb2035b..0196a0df9021 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
@@ -54,9 +55,15 @@ struct virtnet_info
struct tasklet_struct tasklet;
bool free_in_tasklet;
+ /* I like... big packets and I cannot lie! */
+ bool big_packets;
+
/* Receive & send queues. */
struct sk_buff_head recv;
struct sk_buff_head send;
+
+ /* Chain pages by the private ptr. */
+ struct page *pages;
};
static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
@@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
}
+static void give_a_page(struct virtnet_info *vi, struct page *page)
+{
+ page->private = (unsigned long)vi->pages;
+ vi->pages = page;
+}
+
+static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
+{
+ struct page *p = vi->pages;
+
+ if (p)
+ vi->pages = (struct page *)p->private;
+ else
+ p = alloc_page(gfp_mask);
+ return p;
+}
+
static void skb_xmit_done(struct virtqueue *svq)
{
struct virtnet_info *vi = svq->vdev->priv;
@@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ int err;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
goto drop;
}
len -= sizeof(struct virtio_net_hdr);
- BUG_ON(len > MAX_PACKET_LEN);
- skb_trim(skb, len);
+ if (len <= MAX_PACKET_LEN) {
+ unsigned int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page);
+ skb->data_len = 0;
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+
+ err = pskb_trim(skb, len);
+ if (err) {
+ pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
+ dev->stats.rx_dropped++;
+ goto drop;
+ }
+ skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err;
+ int num, err, i;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
@@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi)
skb_put(skb, MAX_PACKET_LEN);
vnet_hdr_to_sg(sg, skb);
+
+ if (vi->big_packets) {
+ for (i = 0; i < MAX_SKB_FRAGS; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ f->page = get_a_page(vi, GFP_ATOMIC);
+ if (!f->page)
+ break;
+
+ f->page_offset = 0;
+ f->size = PAGE_SIZE;
+
+ skb->data_len += PAGE_SIZE;
+ skb->len += PAGE_SIZE;
+
+ skb_shinfo(skb)->nr_frags++;
+ }
+ }
+
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
skb_queue_head(&vi->recv, skb);
@@ -335,16 +391,11 @@ again:
free_old_xmit_skbs(vi);
/* If we has a buffer left over from last time, send it now. */
- if (unlikely(vi->last_xmit_skb)) {
- if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
- /* Drop this skb: we only queue one. */
- vi->dev->stats.tx_dropped++;
- kfree_skb(skb);
- skb = NULL;
- goto stop_queue;
- }
- vi->last_xmit_skb = NULL;
- }
+ if (unlikely(vi->last_xmit_skb) &&
+ xmit_skb(vi, vi->last_xmit_skb) != 0)
+ goto stop_queue;
+
+ vi->last_xmit_skb = NULL;
/* Put new one in send queue and do transmit */
if (likely(skb)) {
@@ -370,6 +421,11 @@ stop_queue:
netif_start_queue(dev);
goto again;
}
+ if (skb) {
+ /* Drop this skb: we only queue one. */
+ vi->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ }
goto done;
}
@@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
+static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
+ return -ENOSYS;
+
+ return ethtool_op_set_tx_hw_csum(dev, data);
+}
+
+static struct ethtool_ops virtnet_ethtool_ops = {
+ .set_tx_csum = virtnet_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+};
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
@@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev)
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = virtnet_netpoll;
#endif
+ SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
@@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
+ vi->pages = NULL;
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
+ || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
+ || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ vi->big_packets = true;
+
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
@@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev)
vdev->config->del_vq(vi->svq);
vdev->config->del_vq(vi->rvq);
unregister_netdev(vi->dev);
+
+ while (vi->pages)
+ __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+
free_netdev(vi->dev);
}
@@ -553,7 +637,9 @@ static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
- VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+ VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
+ VIRTIO_F_NOTIFY_ON_EMPTY,
};
static struct virtio_driver virtio_net = {
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 217d506527a9..d9769c527346 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
+ if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
- if (pci_dma_mapping_error(bf->skbaddr)) {
+ if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
return -EIO;
}