提交 a9f98bb5 编写于 作者: J Jason Wang 提交者: Anthony Liguori

vhost: multiqueue support

This patch lets vhost support multiqueue. The idea is simple, just launching
multiple threads of vhost and let each of vhost thread processing a subset of
the virtqueues of the device. After this change each emulated device can have
multiple vhost threads as its backend.

To do this, a virtqueue index were introduced to record to first virtqueue that
will be handled by this vhost_net device. Based on this and nvqs, vhost could
calculate its relative index to setup vhost_net device.

Since we may have many vhost/net devices for a virtio-net device. The setting of
guest notifiers were moved out of the starting/stopping of a specific vhost
thread. The vhost_net_{start|stop}() were renamed to
vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced
to configure the guest notifiers and start/stop all vhost/vhost_net devices.
Signed-off-by: NJason Wang <jasowang@redhat.com>
Signed-off-by: NAnthony Liguori <aliguori@us.ibm.com>
上级 264986e2
...@@ -616,14 +616,17 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, ...@@ -616,14 +616,17 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
{ {
hwaddr s, l, a; hwaddr s, l, a;
int r; int r;
int vhost_vq_index = idx - dev->vq_index;
struct vhost_vring_file file = { struct vhost_vring_file file = {
.index = idx, .index = vhost_vq_index
}; };
struct vhost_vring_state state = { struct vhost_vring_state state = {
.index = idx, .index = vhost_vq_index
}; };
struct VirtQueue *vvq = virtio_get_queue(vdev, idx); struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
vq->num = state.num = virtio_queue_get_num(vdev, idx); vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
if (r) { if (r) {
...@@ -666,11 +669,12 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, ...@@ -666,11 +669,12 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
goto fail_alloc_ring; goto fail_alloc_ring;
} }
r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) { if (r < 0) {
r = -errno; r = -errno;
goto fail_alloc; goto fail_alloc;
} }
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
if (r) { if (r) {
...@@ -706,9 +710,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev, ...@@ -706,9 +710,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
unsigned idx) unsigned idx)
{ {
struct vhost_vring_state state = { struct vhost_vring_state state = {
.index = idx, .index = idx - dev->vq_index
}; };
int r; int r;
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
...@@ -864,7 +869,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -864,7 +869,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
} }
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true); r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
true);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
goto fail_vq; goto fail_vq;
...@@ -874,7 +881,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -874,7 +881,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0; return 0;
fail_vq: fail_vq:
while (--i >= 0) { while (--i >= 0) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
false);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr); fflush(stderr);
...@@ -895,7 +904,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -895,7 +904,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
int i, r; int i, r;
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
false);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr); fflush(stderr);
...@@ -909,8 +920,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -909,8 +920,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
*/ */
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{ {
struct vhost_virtqueue *vq = hdev->vqs + n; struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
assert(hdev->started); assert(hdev->started);
assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
return event_notifier_test_and_clear(&vq->masked_notifier); return event_notifier_test_and_clear(&vq->masked_notifier);
} }
...@@ -919,15 +931,16 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, ...@@ -919,15 +931,16 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask) bool mask)
{ {
struct VirtQueue *vvq = virtio_get_queue(vdev, n); struct VirtQueue *vvq = virtio_get_queue(vdev, n);
int r; int r, index = n - hdev->vq_index;
assert(hdev->started); assert(hdev->started);
assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
struct vhost_vring_file file = { struct vhost_vring_file file = {
.index = n, .index = index
}; };
if (mask) { if (mask) {
file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier); file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else { } else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
} }
...@@ -942,20 +955,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -942,20 +955,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->started = true; hdev->started = true;
if (!vdev->binding->set_guest_notifiers) {
fprintf(stderr, "binding does not support guest notifiers\n");
r = -ENOSYS;
goto fail;
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
hdev->nvqs,
true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_notifiers;
}
r = vhost_dev_set_features(hdev, hdev->log_enabled); r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) { if (r < 0) {
goto fail_features; goto fail_features;
...@@ -967,9 +966,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -967,9 +966,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
} }
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_start(hdev, r = vhost_virtqueue_start(hdev,
vdev, vdev,
hdev->vqs + i, hdev->vqs + i,
i); hdev->vq_index + i);
if (r < 0) { if (r < 0) {
goto fail_vq; goto fail_vq;
} }
...@@ -992,15 +991,13 @@ fail_log: ...@@ -992,15 +991,13 @@ fail_log:
fail_vq: fail_vq:
while (--i >= 0) { while (--i >= 0) {
vhost_virtqueue_stop(hdev, vhost_virtqueue_stop(hdev,
vdev, vdev,
hdev->vqs + i, hdev->vqs + i,
i); hdev->vq_index + i);
} }
i = hdev->nvqs;
fail_mem: fail_mem:
fail_features: fail_features:
vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
fail_notifiers:
fail:
hdev->started = false; hdev->started = false;
return r; return r;
...@@ -1009,29 +1006,22 @@ fail: ...@@ -1009,29 +1006,22 @@ fail:
/* Host notifiers must be enabled at this point. */ /* Host notifiers must be enabled at this point. */
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{ {
int i, r; int i;
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev, vhost_virtqueue_stop(hdev,
vdev, vdev,
hdev->vqs + i, hdev->vqs + i,
i); hdev->vq_index + i);
} }
for (i = 0; i < hdev->n_mem_sections; ++i) { for (i = 0; i < hdev->n_mem_sections; ++i) {
vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i], vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
0, (hwaddr)~0x0ull); 0, (hwaddr)~0x0ull);
} }
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
hdev->nvqs,
false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert (r >= 0);
hdev->started = false; hdev->started = false;
g_free(hdev->log); g_free(hdev->log);
hdev->log = NULL; hdev->log = NULL;
hdev->log_size = 0; hdev->log_size = 0;
} }
...@@ -35,6 +35,8 @@ struct vhost_dev { ...@@ -35,6 +35,8 @@ struct vhost_dev {
MemoryRegionSection *mem_sections; MemoryRegionSection *mem_sections;
struct vhost_virtqueue *vqs; struct vhost_virtqueue *vqs;
int nvqs; int nvqs;
/* the first virtuque which would be used by this vhost dev */
int vq_index;
unsigned long long features; unsigned long long features;
unsigned long long acked_features; unsigned long long acked_features;
unsigned long long backend_features; unsigned long long backend_features;
......
...@@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) ...@@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return vhost_dev_query(&net->dev, dev); return vhost_dev_query(&net->dev, dev);
} }
int vhost_net_start(struct vhost_net *net, static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev) VirtIODevice *dev,
int vq_index)
{ {
struct vhost_vring_file file = { }; struct vhost_vring_file file = { };
int r; int r;
if (net->dev.started) {
return 0;
}
net->dev.nvqs = 2;
net->dev.vqs = net->vqs;
net->dev.vq_index = vq_index;
r = vhost_dev_enable_notifiers(&net->dev, dev); r = vhost_dev_enable_notifiers(&net->dev, dev);
if (r < 0) { if (r < 0) {
goto fail_notifiers; goto fail_notifiers;
...@@ -181,11 +190,15 @@ fail_notifiers: ...@@ -181,11 +190,15 @@ fail_notifiers:
return r; return r;
} }
void vhost_net_stop(struct vhost_net *net, static void vhost_net_stop_one(struct vhost_net *net,
VirtIODevice *dev) VirtIODevice *dev)
{ {
struct vhost_vring_file file = { .fd = -1 }; struct vhost_vring_file file = { .fd = -1 };
if (!net->dev.started) {
return;
}
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file); int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
assert(r >= 0); assert(r >= 0);
...@@ -195,6 +208,61 @@ void vhost_net_stop(struct vhost_net *net, ...@@ -195,6 +208,61 @@ void vhost_net_stop(struct vhost_net *net,
vhost_dev_disable_notifiers(&net->dev, dev); vhost_dev_disable_notifiers(&net->dev, dev);
} }
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int r, i = 0;
if (!dev->binding->set_guest_notifiers) {
error_report("binding does not support guest notifiers\n");
r = -ENOSYS;
goto err;
}
for (i = 0; i < total_queues; i++) {
r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2);
if (r < 0) {
goto err;
}
}
r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
true);
if (r < 0) {
error_report("Error binding guest notifier: %d\n", -r);
goto err;
}
return 0;
err:
while (--i >= 0) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
return r;
}
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int i, r;
r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert(r >= 0);
for (i = 0; i < total_queues; i++) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
}
void vhost_net_cleanup(struct vhost_net *net) void vhost_net_cleanup(struct vhost_net *net)
{ {
vhost_dev_cleanup(&net->dev); vhost_dev_cleanup(&net->dev);
...@@ -224,13 +292,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) ...@@ -224,13 +292,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return false; return false;
} }
int vhost_net_start(struct vhost_net *net, int vhost_net_start(VirtIODevice *dev,
VirtIODevice *dev) NetClientState *ncs,
int total_queues)
{ {
return -ENOSYS; return -ENOSYS;
} }
void vhost_net_stop(struct vhost_net *net, void vhost_net_stop(VirtIODevice *dev,
VirtIODevice *dev) NetClientState *ncs,
int total_queues)
{ {
} }
......
...@@ -9,8 +9,8 @@ typedef struct vhost_net VHostNetState; ...@@ -9,8 +9,8 @@ typedef struct vhost_net VHostNetState;
VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force); VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);
bool vhost_net_query(VHostNetState *net, VirtIODevice *dev); bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
int vhost_net_start(VHostNetState *net, VirtIODevice *dev); int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
void vhost_net_stop(VHostNetState *net, VirtIODevice *dev); void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
void vhost_net_cleanup(VHostNetState *net); void vhost_net_cleanup(VHostNetState *net);
......
...@@ -130,14 +130,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) ...@@ -130,14 +130,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
return; return;
} }
n->vhost_started = 1; n->vhost_started = 1;
r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev); r = vhost_net_start(&n->vdev, nc, 1);
if (r < 0) { if (r < 0) {
error_report("unable to start vhost net: %d: " error_report("unable to start vhost net: %d: "
"falling back on userspace virtio", -r); "falling back on userspace virtio", -r);
n->vhost_started = 0; n->vhost_started = 0;
} }
} else { } else {
vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev); vhost_net_stop(&n->vdev, nc, 1);
n->vhost_started = 0; n->vhost_started = 0;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册