提交 54dd9321 编写于 作者: M Michael S. Tsirkin

virtio: change set guest notifier to per-device

When using irqfd with vhost-net to inject interrupts,
a single evenfd might inject multiple interrupts.
Implementing this is much easier with a single
per-device callback to set guest notifiers.
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
上级 010ec629
...@@ -454,11 +454,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, ...@@ -454,11 +454,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
}; };
struct VirtQueue *vvq = virtio_get_queue(vdev, idx); struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
if (!vdev->binding->set_guest_notifier) {
fprintf(stderr, "binding does not support guest notifiers\n");
return -ENOSYS;
}
if (!vdev->binding->set_host_notifier) { if (!vdev->binding->set_host_notifier) {
fprintf(stderr, "binding does not support host notifiers\n"); fprintf(stderr, "binding does not support host notifiers\n");
return -ENOSYS; return -ENOSYS;
...@@ -511,12 +506,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, ...@@ -511,12 +506,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
r = -errno; r = -errno;
goto fail_alloc; goto fail_alloc;
} }
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_guest_notifier;
}
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true); r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
if (r < 0) { if (r < 0) {
fprintf(stderr, "Error binding host notifier: %d\n", -r); fprintf(stderr, "Error binding host notifier: %d\n", -r);
...@@ -541,8 +530,6 @@ fail_call: ...@@ -541,8 +530,6 @@ fail_call:
fail_kick: fail_kick:
vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false); vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier: fail_host_notifier:
vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
fail_guest_notifier:
fail_alloc: fail_alloc:
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, 0); 0, 0);
...@@ -568,13 +555,6 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, ...@@ -568,13 +555,6 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
.index = idx, .index = idx,
}; };
int r; int r;
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d guest cleanup failed: %d\n", idx, r);
fflush(stderr);
}
assert (r >= 0);
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false); r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r); fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
...@@ -647,15 +627,26 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) ...@@ -647,15 +627,26 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{ {
int i, r; int i, r;
if (!vdev->binding->set_guest_notifiers) {
fprintf(stderr, "binding does not support guest notifiers\n");
r = -ENOSYS;
goto fail;
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_notifiers;
}
r = vhost_dev_set_features(hdev, hdev->log_enabled); r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) { if (r < 0) {
goto fail; goto fail_features;
} }
r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem); r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
if (r < 0) { if (r < 0) {
r = -errno; r = -errno;
goto fail; goto fail_mem;
} }
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_init(hdev, r = vhost_virtqueue_init(hdev,
...@@ -675,13 +666,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -675,13 +666,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
(uint64_t)(unsigned long)hdev->log); (uint64_t)(unsigned long)hdev->log);
if (r < 0) { if (r < 0) {
r = -errno; r = -errno;
goto fail_vq; goto fail_log;
} }
} }
hdev->started = true; hdev->started = true;
return 0; return 0;
fail_log:
fail_vq: fail_vq:
while (--i >= 0) { while (--i >= 0) {
vhost_virtqueue_cleanup(hdev, vhost_virtqueue_cleanup(hdev,
...@@ -689,13 +681,18 @@ fail_vq: ...@@ -689,13 +681,18 @@ fail_vq:
hdev->vqs + i, hdev->vqs + i,
i); i);
} }
fail_mem:
fail_features:
vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
fail_notifiers:
fail: fail:
return r; return r;
} }
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{ {
int i; int i, r;
for (i = 0; i < hdev->nvqs; ++i) { for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev, vhost_virtqueue_cleanup(hdev,
vdev, vdev,
...@@ -704,6 +701,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) ...@@ -704,6 +701,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
} }
vhost_client_sync_dirty_bitmap(&hdev->client, 0, vhost_client_sync_dirty_bitmap(&hdev->client, 0,
(target_phys_addr_t)~0x0ull); (target_phys_addr_t)~0x0ull);
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert (r >= 0);
hdev->started = false; hdev->started = false;
qemu_free(hdev->log); qemu_free(hdev->log);
hdev->log_size = 0; hdev->log_size = 0;
......
...@@ -451,6 +451,33 @@ static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign) ...@@ -451,6 +451,33 @@ static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
return 0; return 0;
} }
static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = proxy->vdev;
int r, n;
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
break;
}
r = virtio_pci_set_guest_notifier(opaque, n, assign);
if (r < 0) {
goto assign_error;
}
}
return 0;
assign_error:
/* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
while (--n >= 0) {
virtio_pci_set_guest_notifier(opaque, n, !assign);
}
return r;
}
static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign) static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
{ {
VirtIOPCIProxy *proxy = opaque; VirtIOPCIProxy *proxy = opaque;
...@@ -488,7 +515,7 @@ static const VirtIOBindings virtio_pci_bindings = { ...@@ -488,7 +515,7 @@ static const VirtIOBindings virtio_pci_bindings = {
.load_queue = virtio_pci_load_queue, .load_queue = virtio_pci_load_queue,
.get_features = virtio_pci_get_features, .get_features = virtio_pci_get_features,
.set_host_notifier = virtio_pci_set_host_notifier, .set_host_notifier = virtio_pci_set_host_notifier,
.set_guest_notifier = virtio_pci_set_guest_notifier, .set_guest_notifiers = virtio_pci_set_guest_notifiers,
}; };
static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev, static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
......
...@@ -93,7 +93,7 @@ typedef struct { ...@@ -93,7 +93,7 @@ typedef struct {
int (*load_config)(void * opaque, QEMUFile *f); int (*load_config)(void * opaque, QEMUFile *f);
int (*load_queue)(void * opaque, int n, QEMUFile *f); int (*load_queue)(void * opaque, int n, QEMUFile *f);
unsigned (*get_features)(void * opaque); unsigned (*get_features)(void * opaque);
int (*set_guest_notifier)(void * opaque, int n, bool assigned); int (*set_guest_notifiers)(void * opaque, bool assigned);
int (*set_host_notifier)(void * opaque, int n, bool assigned); int (*set_host_notifier)(void * opaque, int n, bool assigned);
} VirtIOBindings; } VirtIOBindings;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册