diff options
author | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
commit | 9c54aeb03a6d65a5834bd92376e921cbac6dfb8f (patch) | |
tree | 70441095d58678711d68cfef4934765251425d1f /drivers/vhost | |
parent | eaf47b17a77fda841a1102d76c15161ee438b347 (diff) | |
parent | 918fe1b3157978ada4267468008c5f89ef101e7d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Filling in the padding slot in the bpf structure as a bug fix in 'ne'
overlapped with actually using that padding area for something in
'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/net.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c4b49fca4871..e7cf7d21cfb5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -108,7 +108,9 @@ struct vhost_net_virtqueue { /* vhost zerocopy support fields below: */ /* last used idx for outstanding DMA zerocopy buffers */ int upend_idx; - /* first used idx for DMA done zerocopy buffers */ + /* For TX, first used idx for DMA done zerocopy buffers + * For RX, number of batched heads + */ int done_idx; /* an array of userspace buffers info */ struct ubuf_info *ubuf_info; @@ -629,6 +631,18 @@ static int sk_has_rx_data(struct sock *sk) return skb_queue_empty(&sk->sk_receive_queue); } +static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) +{ + struct vhost_virtqueue *vq = &nvq->vq; + struct vhost_dev *dev = vq->dev; + + if (!nvq->done_idx) + return; + + vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); + nvq->done_idx = 0; +} + static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) { struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; @@ -638,6 +652,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) int len = peek_head_len(rvq, sk); if (!len && vq->busyloop_timeout) { + /* Flush batched heads first */ + vhost_rx_signal_used(rvq); /* Both tx vq and rx socket were polled here */ mutex_lock_nested(&vq->mutex, 1); vhost_disable_notify(&net->dev, vq); @@ -765,7 +781,7 @@ static void handle_rx(struct vhost_net *net) }; size_t total_len = 0; int err, mergeable; - s16 headcount, nheads = 0; + s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; struct socket *sock; @@ -794,8 +810,8 @@ static void handle_rx(struct vhost_net *net) while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; - headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, - &in, vq_log, &log, + headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, + vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) @@ -866,12 +882,9 @@ static void handle_rx(struct vhost_net *net) vhost_discard_vq_desc(vq, headcount); goto out; } - nheads += headcount; - if (nheads > VHOST_RX_BATCH) { - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, - nheads); - nheads = 0; - } + nvq->done_idx += headcount; + if (nvq->done_idx > VHOST_RX_BATCH) + vhost_rx_signal_used(nvq); if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; @@ -883,9 +896,7 @@ static void handle_rx(struct vhost_net *net) } vhost_net_enable_vq(net, vq); out: - if (nheads) - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, - nheads); + vhost_rx_signal_used(nvq); mutex_unlock(&vq->mutex); } |