diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 39155d7cc8946ead49d2a55ebec37006ac0d95a1..c1330ee622f01927d2220bc6c3e9c168b0118f2b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -556,8 +556,9 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) size_t len, total_len = 0; int err; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr = false; head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, @@ -598,11 +599,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) err, len); if (++nvq->done_idx >= VHOST_NET_BATCH) vhost_net_signal_used(nvq); - if (vhost_exceeds_weight(++sent_pkts, total_len)) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); vhost_net_signal_used(nvq); } @@ -625,8 +625,9 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) struct vhost_net_ubuf_ref *uninitialized_var(ubufs); bool zcopy_used; int sent_pkts = 0; + bool next_round = false; - for (;;) { + do { bool busyloop_intr; /* Release DMAs done buffers first */ @@ -701,11 +702,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (!(next_round = vhost_exceeds_weight(++sent_pkts, total_len))); + + if (next_round) + vhost_poll_queue(&vq->poll); } /* Expects to be always run from workqueue - which acts as @@ -922,6 +922,7 @@ static void handle_rx(struct vhost_net *net) struct iov_iter fixup; __virtio16 num_buffers; int recv_pkts = 0; + bool next_round = false; mutex_lock_nested(&vq->mutex, 0); sock = vq->private_data; @@ -941,8 +942,11 @@ static void handle_rx(struct vhost_net *net) vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); - while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, - &busyloop_intr))) { + do { + sock_len = vhost_net_rx_peek_head_len(net, sock->sk, + &busyloop_intr); + if (!sock_len) + break; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -1027,12 +1031,9 @@ static void handle_rx(struct vhost_net *net) vhost_log_write(vq, vq_log, log, vhost_len, vq->iov, in); total_len += vhost_len; - if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - goto out; - } - } - if (unlikely(busyloop_intr)) + } while (!(next_round = vhost_exceeds_weight(++recv_pkts, total_len))); + + if (unlikely(busyloop_intr || next_round)) vhost_poll_queue(&vq->poll); else vhost_net_enable_vq(net, vq);