提交 33bce9ca 编写于 作者: J Jean-Philippe Brucker 提交者: Xie XiuQi

iommu/sva: Register page fault handler

hulk inclusion
category: feature
bugzilla: 14369
CVE: NA
-------------------

Let users call iommu_sva_device_init() with the IOMMU_SVA_FEAT_IOPF flag,
that enables the I/O Page Fault queue. The IOMMU driver checks is the
device supports a form of page fault, in which case they add the device to
a fault queue. If the device doesn't support page faults, the IOMMU driver
aborts iommu_sva_device_init().

The fault queue must be flushed before any io_mm is freed, to make sure
that its PASID isn't used in any fault queue, and can be reallocated.
Add iopf_queue_flush() calls in a few strategic locations.
Signed-off-by: NJean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: NFang Lijun <fanglijun3@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Reviewed-by: NZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 16efc4ff
...@@ -442,6 +442,8 @@ static void iommu_notifier_release(struct mmu_notifier *mn, ...@@ -442,6 +442,8 @@ static void iommu_notifier_release(struct mmu_notifier *mn,
dev_WARN(bond->dev, "possible leak of PASID %u", dev_WARN(bond->dev, "possible leak of PASID %u",
io_mm->pasid); io_mm->pasid);
iopf_queue_flush_dev(bond->dev);
spin_lock(&iommu_sva_lock); spin_lock(&iommu_sva_lock);
next = list_next_entry(bond, mm_head); next = list_next_entry(bond, mm_head);
...@@ -519,6 +521,9 @@ static struct mmu_notifier_ops iommu_mmu_notifier = { ...@@ -519,6 +521,9 @@ static struct mmu_notifier_ops iommu_mmu_notifier = {
* description. Setting @max_pasid to a non-zero value smaller than this limit * description. Setting @max_pasid to a non-zero value smaller than this limit
* overrides it. * overrides it.
* *
* If the device should support recoverable I/O Page Faults (e.g. PCI PRI), the
* IOMMU_SVA_FEAT_IOPF feature must be requested.
*
* If the driver intends to share process address spaces, it should pass a valid * If the driver intends to share process address spaces, it should pass a valid
* @mm_exit handler. Otherwise @mm_exit can be NULL. After @mm_exit returns, the * @mm_exit handler. Otherwise @mm_exit can be NULL. After @mm_exit returns, the
* device must not issue any more transaction with the PASID given as argument. * device must not issue any more transaction with the PASID given as argument.
...@@ -547,12 +552,21 @@ int iommu_sva_device_init(struct device *dev, unsigned long features, ...@@ -547,12 +552,21 @@ int iommu_sva_device_init(struct device *dev, unsigned long features,
if (!domain || !domain->ops->sva_device_init) if (!domain || !domain->ops->sva_device_init)
return -ENODEV; return -ENODEV;
if (features) if (features & ~IOMMU_SVA_FEAT_IOPF)
return -EINVAL; return -EINVAL;
if (features & IOMMU_SVA_FEAT_IOPF) {
ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf,
dev);
if (ret)
return ret;
}
param = kzalloc(sizeof(*param), GFP_KERNEL); param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) if (!param) {
return -ENOMEM; ret = -ENOMEM;
goto err_remove_handler;
}
param->features = features; param->features = features;
param->max_pasid = max_pasid; param->max_pasid = max_pasid;
...@@ -585,6 +599,9 @@ int iommu_sva_device_init(struct device *dev, unsigned long features, ...@@ -585,6 +599,9 @@ int iommu_sva_device_init(struct device *dev, unsigned long features,
err_free_param: err_free_param:
kfree(param); kfree(param);
err_remove_handler:
iommu_unregister_device_fault_handler(dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(iommu_sva_device_init); EXPORT_SYMBOL_GPL(iommu_sva_device_init);
...@@ -594,7 +611,8 @@ EXPORT_SYMBOL_GPL(iommu_sva_device_init); ...@@ -594,7 +611,8 @@ EXPORT_SYMBOL_GPL(iommu_sva_device_init);
* @dev: the device * @dev: the device
* *
* Disable SVA. Device driver should ensure that the device isn't performing any * Disable SVA. Device driver should ensure that the device isn't performing any
* DMA while this function is running. * DMA while this function is running. In addition all faults should have been
* flushed to the IOMMU.
*/ */
int iommu_sva_device_shutdown(struct device *dev) int iommu_sva_device_shutdown(struct device *dev)
{ {
...@@ -618,6 +636,8 @@ int iommu_sva_device_shutdown(struct device *dev) ...@@ -618,6 +636,8 @@ int iommu_sva_device_shutdown(struct device *dev)
kfree(param); kfree(param);
iommu_unregister_device_fault_handler(dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_sva_device_shutdown); EXPORT_SYMBOL_GPL(iommu_sva_device_shutdown);
...@@ -695,6 +715,12 @@ int __iommu_sva_unbind_device(struct device *dev, int pasid) ...@@ -695,6 +715,12 @@ int __iommu_sva_unbind_device(struct device *dev, int pasid)
if (!param || WARN_ON(!domain)) if (!param || WARN_ON(!domain))
return -EINVAL; return -EINVAL;
/*
* Caller stopped the device from issuing PASIDs, now make sure they are
* out of the fault queue.
*/
iopf_queue_flush_dev(dev);
/* spin_lock_irq matches the one in wait_event_lock_irq */ /* spin_lock_irq matches the one in wait_event_lock_irq */
spin_lock_irq(&iommu_sva_lock); spin_lock_irq(&iommu_sva_lock);
list_for_each_entry(bond, &param->mm_list, dev_head) { list_for_each_entry(bond, &param->mm_list, dev_head) {
...@@ -722,6 +748,8 @@ void __iommu_sva_unbind_dev_all(struct device *dev) ...@@ -722,6 +748,8 @@ void __iommu_sva_unbind_dev_all(struct device *dev)
struct iommu_sva_param *param; struct iommu_sva_param *param;
struct iommu_bond *bond, *next; struct iommu_bond *bond, *next;
iopf_queue_flush_dev(dev);
/* /*
* io_mm_detach_locked might wait, so we shouldn't call it with the dev * io_mm_detach_locked might wait, so we shouldn't call it with the dev
* param lock held. It's fine to read sva_param outside the lock because * param lock held. It's fine to read sva_param outside the lock because
......
...@@ -2365,9 +2365,9 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); ...@@ -2365,9 +2365,9 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
* iommu_sva_device_init() must be called first, to initialize the required SVA * iommu_sva_device_init() must be called first, to initialize the required SVA
* features. @flags is a subset of these features. * features. @flags is a subset of these features.
* *
* The caller must pin down using get_user_pages*() all mappings shared with the * If IOMMU_SVA_FEAT_IOPF isn't requested, the caller must pin down using
* device. mlock() isn't sufficient, as it doesn't prevent minor page faults * get_user_pages*() all mappings shared with the device. mlock() isn't
* (e.g. copy-on-write). * sufficient, as it doesn't prevent minor page faults (e.g. copy-on-write).
* *
* On success, 0 is returned and @pasid contains a valid ID. Otherwise, an error * On success, 0 is returned and @pasid contains a valid ID. Otherwise, an error
* is returned. * is returned.
......
...@@ -63,6 +63,8 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, ...@@ -63,6 +63,8 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault_event *, void *); typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault_event *, void *);
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, int pasid, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, int pasid, void *);
#define IOMMU_SVA_FEAT_IOPF (1 << 0)
struct iommu_domain_geometry { struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */ dma_addr_t aperture_start; /* First address that can be mapped */
dma_addr_t aperture_end; /* Last address that can be mapped */ dma_addr_t aperture_end; /* Last address that can be mapped */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册