提交 9b4742a6 编写于 作者: E Eric Auger 提交者: Zheng Zengkai

vfio/pci: Inject page response upon response region fill

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I401IF
CVE: NA

------------------------------

When the userspace increments the head of the page response
buffer ring, let's push the response into the iommu layer.
This is done through a workqueue that pops the responses from
the ring buffer and increment the tail.
Signed-off-by: NEric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang<jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 cbbf4b3a
......@@ -552,6 +552,32 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
return ret;
}
static void dma_response_inject(struct work_struct *work)
{
struct vfio_pci_dma_fault_response_work *rwork =
container_of(work, struct vfio_pci_dma_fault_response_work, inject);
struct vfio_region_dma_fault_response *header = rwork->header;
struct vfio_pci_device *vdev = rwork->vdev;
struct iommu_page_response *resp;
u32 tail, head, size;
mutex_lock(&vdev->fault_response_queue_lock);
tail = header->tail;
head = header->head;
size = header->nb_entries;
while (CIRC_CNT(head, tail, size) >= 1) {
resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
tail * header->entry_size);
/* TODO: properly handle the return value */
iommu_page_response(&vdev->pdev->dev, resp);
header->tail = tail = (tail + 1) % size;
}
mutex_unlock(&vdev->fault_response_queue_lock);
}
#define DMA_FAULT_RESPONSE_RING_LENGTH 512
static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
......@@ -597,8 +623,22 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
header->offset = PAGE_SIZE;
vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
if (!vdev->response_work)
goto out;
vdev->response_work->header = header;
vdev->response_work->vdev = vdev;
/* launch the thread that will extract the response */
INIT_WORK(&vdev->response_work->inject, dma_response_inject);
vdev->dma_fault_response_wq =
create_singlethread_workqueue("vfio-dma-fault-response");
if (!vdev->dma_fault_response_wq)
return -ENOMEM;
return 0;
out:
kfree(vdev->fault_response_pages);
vdev->fault_response_pages = NULL;
return ret;
}
......
......@@ -52,6 +52,12 @@ struct vfio_pci_irq_ctx {
struct irq_bypass_producer producer;
};
struct vfio_pci_dma_fault_response_work {
struct work_struct inject;
struct vfio_region_dma_fault_response *header;
struct vfio_pci_device *vdev;
};
struct vfio_pci_device;
struct vfio_pci_region;
......@@ -146,6 +152,7 @@ struct vfio_pci_device {
u8 *fault_pages;
u8 *fault_response_pages;
struct workqueue_struct *dma_fault_response_wq;
struct vfio_pci_dma_fault_response_work *response_work;
struct mutex fault_queue_lock;
struct mutex fault_response_queue_lock;
struct list_head dummy_resources_list;
......
......@@ -430,6 +430,7 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
mutex_lock(&vdev->fault_response_queue_lock);
header->head = new_head;
mutex_unlock(&vdev->fault_response_queue_lock);
queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
} else {
if (copy_to_user(buf, base + pos, count))
return -EFAULT;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册