提交 e32f0f77 编写于 作者: K Kunkun Jiang 提交者: Zheng Zengkai

Revert "vfio/pci: Inject page response upon response region fill"

virt inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I61SPO
CVE: NA

--------------------------------

This reverts commit 9b4742a6.
Signed-off-by: NKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 10fa571a
...@@ -607,32 +607,6 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev) ...@@ -607,32 +607,6 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
return ret; return ret;
} }
static void dma_response_inject(struct work_struct *work)
{
struct vfio_pci_dma_fault_response_work *rwork =
container_of(work, struct vfio_pci_dma_fault_response_work, inject);
struct vfio_region_dma_fault_response *header = rwork->header;
struct vfio_pci_device *vdev = rwork->vdev;
struct iommu_page_response *resp;
u32 tail, head, size;
mutex_lock(&vdev->fault_response_queue_lock);
tail = header->tail;
head = header->head;
size = header->nb_entries;
while (CIRC_CNT(head, tail, size) >= 1) {
resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
tail * header->entry_size);
/* TODO: properly handle the return value */
iommu_page_response(&vdev->pdev->dev, resp);
header->tail = tail = (tail + 1) % size;
}
mutex_unlock(&vdev->fault_response_queue_lock);
}
#define DMA_FAULT_RESPONSE_RING_LENGTH 512 #define DMA_FAULT_RESPONSE_RING_LENGTH 512
static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev) static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
...@@ -678,22 +652,8 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev) ...@@ -678,22 +652,8 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH; header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
header->offset = PAGE_SIZE; header->offset = PAGE_SIZE;
vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
if (!vdev->response_work)
goto out;
vdev->response_work->header = header;
vdev->response_work->vdev = vdev;
/* launch the thread that will extract the response */
INIT_WORK(&vdev->response_work->inject, dma_response_inject);
vdev->dma_fault_response_wq =
create_singlethread_workqueue("vfio-dma-fault-response");
if (!vdev->dma_fault_response_wq)
return -ENOMEM;
return 0; return 0;
out: out:
kfree(vdev->fault_response_pages);
vdev->fault_response_pages = NULL; vdev->fault_response_pages = NULL;
return ret; return ret;
} }
......
...@@ -52,12 +52,6 @@ struct vfio_pci_irq_ctx { ...@@ -52,12 +52,6 @@ struct vfio_pci_irq_ctx {
struct irq_bypass_producer producer; struct irq_bypass_producer producer;
}; };
struct vfio_pci_dma_fault_response_work {
struct work_struct inject;
struct vfio_region_dma_fault_response *header;
struct vfio_pci_device *vdev;
};
struct vfio_pci_device; struct vfio_pci_device;
struct vfio_pci_region; struct vfio_pci_region;
...@@ -159,7 +153,6 @@ struct vfio_pci_device { ...@@ -159,7 +153,6 @@ struct vfio_pci_device {
u8 *fault_pages; u8 *fault_pages;
u8 *fault_response_pages; u8 *fault_response_pages;
struct workqueue_struct *dma_fault_response_wq; struct workqueue_struct *dma_fault_response_wq;
struct vfio_pci_dma_fault_response_work *response_work;
struct mutex fault_queue_lock; struct mutex fault_queue_lock;
struct mutex fault_response_queue_lock; struct mutex fault_response_queue_lock;
struct list_head dummy_resources_list; struct list_head dummy_resources_list;
......
...@@ -440,7 +440,6 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user ...@@ -440,7 +440,6 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
mutex_lock(&vdev->fault_response_queue_lock); mutex_lock(&vdev->fault_response_queue_lock);
header->head = new_head; header->head = new_head;
mutex_unlock(&vdev->fault_response_queue_lock); mutex_unlock(&vdev->fault_response_queue_lock);
queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
} else { } else {
if (copy_to_user(buf, base + pos, count)) if (copy_to_user(buf, base + pos, count))
return -EFAULT; return -EFAULT;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册