提交 5669655a 编写于 作者: V Victor Kaplansky 提交者: Michael S. Tsirkin

vhost-user interrupt management fixes

Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.

Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.

Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.
Signed-off-by: NDidier Pallard <didier.pallard@6wind.com>
Signed-off-by: NVictor Kaplansky <victork@redhat.com>
Reviewed-by: NMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
上级 cefa2bbd
...@@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, ...@@ -284,8 +284,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
} }
for (i = 0; i < total_queues; i++) { for (i = 0; i < total_queues; i++) {
vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2); struct vhost_net *net;
}
net = get_vhost_net(ncs[i].peer);
vhost_net_set_vq_index(net, i * 2);
/* Suppress the masking guest notifiers on vhost user
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
}
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
if (r < 0) { if (r < 0) {
......
...@@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, ...@@ -875,6 +875,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
/* Clear and discard previous events if any. */ /* Clear and discard previous events if any. */
event_notifier_test_and_clear(&vq->masked_notifier); event_notifier_test_and_clear(&vq->masked_notifier);
/* Init vring in unmasked state, unless guest_notifier_mask
* will do it later.
*/
if (!vdev->use_guest_notifier_mask) {
/* TODO: check and handle errors. */
vhost_virtqueue_mask(dev, vdev, idx, false);
}
return 0; return 0;
fail_kick: fail_kick:
...@@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, ...@@ -1167,6 +1175,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
struct vhost_vring_file file; struct vhost_vring_file file;
if (mask) { if (mask) {
assert(vdev->use_guest_notifier_mask);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else { } else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
......
...@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) ...@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, set up irqfd now. /* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend. * Otherwise, delay until unmasked in the frontend.
*/ */
if (k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
if (ret < 0) { if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector); kvm_virtio_pci_vq_vector_release(proxy, vector);
...@@ -822,7 +822,7 @@ undo: ...@@ -822,7 +822,7 @@ undo:
if (vector >= msix_nr_vectors_allocated(dev)) { if (vector >= msix_nr_vectors_allocated(dev)) {
continue; continue;
} }
if (k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
} }
kvm_virtio_pci_vq_vector_release(proxy, vector); kvm_virtio_pci_vq_vector_release(proxy, vector);
...@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) ...@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, clean up irqfd now. /* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend. * Otherwise, it was cleaned when masked in the frontend.
*/ */
if (k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
} }
kvm_virtio_pci_vq_vector_release(proxy, vector); kvm_virtio_pci_vq_vector_release(proxy, vector);
...@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, ...@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
/* If guest supports masking, irqfd is already setup, unmask it. /* If guest supports masking, irqfd is already setup, unmask it.
* Otherwise, set it up now. * Otherwise, set it up now.
*/ */
if (k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, false); k->guest_notifier_mask(vdev, queue_no, false);
/* Test after unmasking to avoid losing events. */ /* Test after unmasking to avoid losing events. */
if (k->guest_notifier_pending && if (k->guest_notifier_pending &&
...@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, ...@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
/* If guest supports masking, keep irqfd but mask it. /* If guest supports masking, keep irqfd but mask it.
* Otherwise, clean it up now. * Otherwise, clean it up now.
*/ */
if (k->guest_notifier_mask) { if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true); k->guest_notifier_mask(vdev, queue_no, true);
} else { } else {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
...@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, ...@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
event_notifier_cleanup(notifier); event_notifier_cleanup(notifier);
} }
if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) { if (!msix_enabled(&proxy->pci_dev) &&
vdev->use_guest_notifier_mask &&
vdc->guest_notifier_mask) {
vdc->guest_notifier_mask(vdev, n, !assign); vdc->guest_notifier_mask(vdev, n, !assign);
} }
......
...@@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name, ...@@ -1677,6 +1677,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
vdev); vdev);
vdev->device_endian = virtio_default_endian(); vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
} }
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
......
...@@ -90,6 +90,7 @@ struct VirtIODevice ...@@ -90,6 +90,7 @@ struct VirtIODevice
VMChangeStateEntry *vmstate; VMChangeStateEntry *vmstate;
char *bus_name; char *bus_name;
uint8_t device_endian; uint8_t device_endian;
bool use_guest_notifier_mask;
QLIST_HEAD(, VirtQueue) *vector_queues; QLIST_HEAD(, VirtQueue) *vector_queues;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册