summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/block/virtio_blk.c1
-rw-r--r--drivers/net/virtio_net.c22
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_pci.c15
-rw-r--r--drivers/virtio/virtio_ring.c1
-rw-r--r--include/linux/virtio.h5
6 files changed, 34 insertions, 14 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3b1a68d6eddb..0cfbe8c594a5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -238,6 +238,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
+ vblk->disk->driverfs_dev = &vdev->dev;
index++;
/* If barriers are supported, tell block layer that queue is ordered */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 19fd4cb0ddf8..b58472cf76f8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -203,8 +203,11 @@ again:
if (received < budget) {
netif_rx_complete(vi->dev, napi);
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
- && netif_rx_reschedule(vi->dev, napi))
+ && napi_schedule_prep(napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __netif_rx_schedule(vi->dev, napi);
goto again;
+ }
}
return received;
@@ -278,10 +281,11 @@ again:
pr_debug("%s: virtio not prepared to send\n", dev->name);
netif_stop_queue(dev);
- /* Activate callback for using skbs: if this fails it
+ /* Activate callback for using skbs: if this returns false it
* means some were used in the meantime. */
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- printk("Unlikely: restart svq failed\n");
+ printk("Unlikely: restart svq race\n");
+ vi->svq->vq_ops->disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
@@ -294,6 +298,15 @@ again:
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void virtnet_netpoll(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ napi_schedule(&vi->napi);
+}
+#endif
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -336,6 +349,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->stop = virtnet_close;
dev->hard_start_xmit = start_xmit;
dev->features = NETIF_F_HIGHDMA;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = virtnet_netpoll;
+#endif
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c8a4332d1132..0b3efc31ee6d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -152,7 +152,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
wake_up(&vb->config_change);
}
-static inline int towards_target(struct virtio_balloon *vb)
+static inline s64 towards_target(struct virtio_balloon *vb)
{
u32 v;
__virtio_config_val(vb->vdev,
@@ -176,7 +176,7 @@ static int balloon(void *_vballoon)
set_freezable();
while (!kthread_should_stop()) {
- int diff;
+ s64 diff;
try_to_freeze();
wait_event_interruptible(vb->config_change,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 26f787ddd5ff..59a8f73dec73 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -177,6 +177,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
struct virtio_pci_device *vp_dev = opaque;
struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
u8 isr;
/* reading the ISR has the effect of also clearing it so it's very
@@ -197,12 +198,12 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
drv->config_changed(&vp_dev->vdev);
}
- spin_lock(&vp_dev->lock);
+ spin_lock_irqsave(&vp_dev->lock, flags);
list_for_each_entry(info, &vp_dev->virtqueues, node) {
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
- spin_unlock(&vp_dev->lock);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
return ret;
}
@@ -214,6 +215,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
struct virtqueue *vq;
+ unsigned long flags;
u16 num;
int err;
@@ -255,9 +257,9 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info;
info->vq = vq;
- spin_lock(&vp_dev->lock);
+ spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
- spin_unlock(&vp_dev->lock);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq;
@@ -274,10 +276,11 @@ static void vp_del_vq(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv;
+ unsigned long flags;
- spin_lock(&vp_dev->lock);
+ spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
- spin_unlock(&vp_dev->lock);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
vring_del_virtqueue(vq);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 3a28c1382131..aa714028641e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -232,7 +232,6 @@ static bool vring_enable_cb(struct virtqueue *_vq)
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
mb();
if (unlikely(more_used(vq))) {
- vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
END_USE(vq);
return false;
}
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 260d1fcf29a4..12c18ac1b973 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -43,8 +43,9 @@ struct virtqueue
* vq: the struct virtqueue we're talking about.
* @enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
- * This returns "false" (and doesn't re-enable) if there are pending
- * buffers in the queue, to avoid a race.
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously.