提交 3c1b27d5 编写于 作者: R Rusty Russell

virtio: make add_buf return capacity remaining

This API change means that virtio_net can tell how much capacity
remains for buffers.  It's necessarily fuzzy, since
VIRTIO_RING_F_INDIRECT_DESC means we can fit any number of descriptors
in one, *if* we can kmalloc.
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Cc: Dinesh Subhraveti <dineshs@us.ibm.com>
上级 f68d2408
...@@ -139,7 +139,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, ...@@ -139,7 +139,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
} }
} }
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool); mempool_free(vbr, vblk->pool);
return false; return false;
} }
......
...@@ -51,7 +51,7 @@ static void register_buffer(void) ...@@ -51,7 +51,7 @@ static void register_buffer(void)
sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
/* There should always be room for one buffer. */ /* There should always be room for one buffer. */
if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) < 0)
BUG(); BUG();
vq->vq_ops->kick(vq); vq->vq_ops->kick(vq);
} }
......
...@@ -65,7 +65,7 @@ static int put_chars(u32 vtermno, const char *buf, int count) ...@@ -65,7 +65,7 @@ static int put_chars(u32 vtermno, const char *buf, int count)
/* add_buf wants a token to identify this buffer: we hand it any /* add_buf wants a token to identify this buffer: we hand it any
* non-NULL pointer, since there's only ever one buffer. */ * non-NULL pointer, since there's only ever one buffer. */
if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) { if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) {
/* Tell Host to go! */ /* Tell Host to go! */
out_vq->vq_ops->kick(out_vq); out_vq->vq_ops->kick(out_vq);
/* Chill out until it's done with the buffer. */ /* Chill out until it's done with the buffer. */
...@@ -85,7 +85,7 @@ static void add_inbuf(void) ...@@ -85,7 +85,7 @@ static void add_inbuf(void)
sg_init_one(sg, inbuf, PAGE_SIZE); sg_init_one(sg, inbuf, PAGE_SIZE);
/* We should always be able to add one buffer to an empty queue. */ /* We should always be able to add one buffer to an empty queue. */
if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) != 0) if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0)
BUG(); BUG();
in_vq->vq_ops->kick(in_vq); in_vq->vq_ops->kick(in_vq);
} }
......
...@@ -320,7 +320,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) ...@@ -320,7 +320,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
skb_queue_head(&vi->recv, skb); skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
if (err) { if (err < 0) {
skb_unlink(skb, &vi->recv); skb_unlink(skb, &vi->recv);
trim_pages(vi, skb); trim_pages(vi, skb);
kfree_skb(skb); kfree_skb(skb);
...@@ -373,7 +373,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) ...@@ -373,7 +373,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
skb_queue_head(&vi->recv, skb); skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb); err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err) { if (err < 0) {
skb_unlink(skb, &vi->recv); skb_unlink(skb, &vi->recv);
kfree_skb(skb); kfree_skb(skb);
break; break;
...@@ -527,7 +527,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) ...@@ -527,7 +527,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
if (!err && !vi->free_in_tasklet) if (err >= 0 && !vi->free_in_tasklet)
mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
return err; return err;
...@@ -538,7 +538,7 @@ static void xmit_tasklet(unsigned long data) ...@@ -538,7 +538,7 @@ static void xmit_tasklet(unsigned long data)
struct virtnet_info *vi = (void *)data; struct virtnet_info *vi = (void *)data;
netif_tx_lock_bh(vi->dev); netif_tx_lock_bh(vi->dev);
if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
vi->svq->vq_ops->kick(vi->svq); vi->svq->vq_ops->kick(vi->svq);
vi->last_xmit_skb = NULL; vi->last_xmit_skb = NULL;
} }
...@@ -557,7 +557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -557,7 +557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If we has a buffer left over from last time, send it now. */ /* If we has a buffer left over from last time, send it now. */
if (unlikely(vi->last_xmit_skb) && if (unlikely(vi->last_xmit_skb) &&
xmit_skb(vi, vi->last_xmit_skb) != 0) xmit_skb(vi, vi->last_xmit_skb) < 0)
goto stop_queue; goto stop_queue;
vi->last_xmit_skb = NULL; vi->last_xmit_skb = NULL;
...@@ -565,7 +565,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -565,7 +565,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Put new one in send queue and do transmit */ /* Put new one in send queue and do transmit */
if (likely(skb)) { if (likely(skb)) {
__skb_queue_head(&vi->send, skb); __skb_queue_head(&vi->send, skb);
if (xmit_skb(vi, skb) != 0) { if (xmit_skb(vi, skb) < 0) {
vi->last_xmit_skb = skb; vi->last_xmit_skb = skb;
skb = NULL; skb = NULL;
goto stop_queue; goto stop_queue;
...@@ -668,7 +668,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, ...@@ -668,7 +668,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi)); BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
vi->cvq->vq_ops->kick(vi->cvq); vi->cvq->vq_ops->kick(vi->cvq);
......
...@@ -84,7 +84,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) ...@@ -84,7 +84,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked); init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */ /* We should always be able to add one buffer to an empty queue. */
if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) != 0) if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
BUG(); BUG();
vq->vq_ops->kick(vq); vq->vq_ops->kick(vq);
......
...@@ -208,7 +208,11 @@ static int vring_add_buf(struct virtqueue *_vq, ...@@ -208,7 +208,11 @@ static int vring_add_buf(struct virtqueue *_vq,
pr_debug("Added buffer head %i to %p\n", head, vq); pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq); END_USE(vq);
return 0;
/* If we're indirect, we can fit many (assuming not OOM). */
if (vq->indirect)
return vq->num_free ? vq->vring.num : 0;
return vq->num_free;
} }
static void vring_kick(struct virtqueue *_vq) static void vring_kick(struct virtqueue *_vq)
......
...@@ -34,7 +34,7 @@ struct virtqueue { ...@@ -34,7 +34,7 @@ struct virtqueue {
* out_num: the number of sg readable by other side * out_num: the number of sg readable by other side
* in_num: the number of sg which are writable (after readable ones) * in_num: the number of sg which are writable (after readable ones)
* data: the token identifying the buffer. * data: the token identifying the buffer.
* Returns 0 or an error. * Returns remaining capacity of queue (sg segments) or a negative error.
* @kick: update after add_buf * @kick: update after add_buf
* vq: the struct virtqueue * vq: the struct virtqueue
* After one or more add_buf calls, invoke this to kick the other side. * After one or more add_buf calls, invoke this to kick the other side.
......
...@@ -200,7 +200,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) ...@@ -200,7 +200,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req->status = REQ_STATUS_SENT; req->status = REQ_STATUS_SENT;
if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) { if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
P9_DPRINTK(P9_DEBUG_TRANS, P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: virtio rpc add_buf returned failure"); "9p debug: virtio rpc add_buf returned failure");
return -EIO; return -EIO;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册