提交 0204a496 编写于 作者: D David Woodhouse

iommu/vt-d: Add callback to device driver on page faults

Signed-off-by: NDavid Woodhouse <David.Woodhouse@intel.com>
上级 a222a7f0
...@@ -264,7 +264,7 @@ static const struct mmu_notifier_ops intel_mmuops = { ...@@ -264,7 +264,7 @@ static const struct mmu_notifier_ops intel_mmuops = {
static DEFINE_MUTEX(pasid_mutex); static DEFINE_MUTEX(pasid_mutex);
int intel_svm_bind_mm(struct device *dev, int *pasid) int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{ {
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
...@@ -302,6 +302,10 @@ int intel_svm_bind_mm(struct device *dev, int *pasid) ...@@ -302,6 +302,10 @@ int intel_svm_bind_mm(struct device *dev, int *pasid)
list_for_each_entry(sdev, &svm->devs, list) { list_for_each_entry(sdev, &svm->devs, list) {
if (dev == sdev->dev) { if (dev == sdev->dev) {
if (sdev->ops != ops) {
ret = -EBUSY;
goto out;
}
sdev->users++; sdev->users++;
goto success; goto success;
} }
...@@ -327,6 +331,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid) ...@@ -327,6 +331,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid)
} }
/* Finish the setup now we know we're keeping it */ /* Finish the setup now we know we're keeping it */
sdev->users = 1; sdev->users = 1;
sdev->ops = ops;
init_rcu_head(&sdev->rcu); init_rcu_head(&sdev->rcu);
if (!svm) { if (!svm) {
...@@ -456,6 +461,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -456,6 +461,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) { while (head != tail) {
struct intel_svm_dev *sdev;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct page_req_dsc *req; struct page_req_dsc *req;
struct qi_desc resp; struct qi_desc resp;
...@@ -507,6 +513,24 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -507,6 +513,24 @@ static irqreturn_t prq_event_thread(int irq, void *d)
up_read(&svm->mm->mmap_sem); up_read(&svm->mm->mmap_sem);
bad_req: bad_req:
/* Accounting for major/minor faults? */ /* Accounting for major/minor faults? */
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
if (sdev->sid == PCI_DEVID(req->bus, req->devfn));
break;
}
/* Other devices can go away, but the drivers are not permitted
* to unbind while any page faults might be in flight. So it's
* OK to drop the 'lock' here now we have it. */
rcu_read_unlock();
if (WARN_ON(&sdev->list == &svm->devs))
sdev = NULL;
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
(req->wr_req << 1) | (req->exe_req);
sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
}
if (req->lpig) { if (req->lpig) {
/* Page Group Response */ /* Page Group Response */
......
...@@ -472,10 +472,13 @@ extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); ...@@ -472,10 +472,13 @@ extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu);
struct svm_dev_ops;
struct intel_svm_dev { struct intel_svm_dev {
struct list_head list; struct list_head list;
struct rcu_head rcu; struct rcu_head rcu;
struct device *dev; struct device *dev;
struct svm_dev_ops *ops;
int users; int users;
u16 did; u16 did;
u16 dev_iotlb:1; u16 dev_iotlb:1;
......
...@@ -20,10 +20,23 @@ ...@@ -20,10 +20,23 @@
struct device; struct device;
struct svm_dev_ops {
void (*fault_cb)(struct device *dev, int pasid, u64 address,
u32 private, int rwxp, int response);
};
/* Values for rxwp in fault_cb callback */
#define SVM_REQ_READ (1<<3)
#define SVM_REQ_WRITE (1<<2)
#define SVM_REQ_EXEC (1<<1)
#define SVM_REQ_PRIV (1<<0)
/** /**
* intel_svm_bind_mm() - Bind the current process to a PASID * intel_svm_bind_mm() - Bind the current process to a PASID
* @dev: Device to be granted acccess * @dev: Device to be granted acccess
* @pasid: Address for allocated PASID * @pasid: Address for allocated PASID
* @flags: Flags. Later for requesting supervisor mode, etc.
* @ops: Callbacks to device driver
* *
* This function attempts to enable PASID support for the given device. * This function attempts to enable PASID support for the given device.
* If the @pasid argument is non-%NULL, a PASID is allocated for access * If the @pasid argument is non-%NULL, a PASID is allocated for access
...@@ -45,7 +58,8 @@ struct device; ...@@ -45,7 +58,8 @@ struct device;
* Multiple calls from the same process may result in the same PASID * Multiple calls from the same process may result in the same PASID
* being re-used. A reference count is kept. * being re-used. A reference count is kept.
*/ */
extern int intel_svm_bind_mm(struct device *dev, int *pasid); extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
struct svm_dev_ops *ops);
/** /**
* intel_svm_unbind_mm() - Unbind a specified PASID * intel_svm_unbind_mm() - Unbind a specified PASID
...@@ -66,7 +80,8 @@ extern int intel_svm_unbind_mm(struct device *dev, int pasid); ...@@ -66,7 +80,8 @@ extern int intel_svm_unbind_mm(struct device *dev, int pasid);
#else /* CONFIG_INTEL_IOMMU_SVM */ #else /* CONFIG_INTEL_IOMMU_SVM */
static inline int intel_svm_bind_mm(struct device *dev, int *pasid) static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
int flags, struct svm_dev_ops *ops)
{ {
return -ENOSYS; return -ENOSYS;
} }
...@@ -77,6 +92,6 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -77,6 +92,6 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
} }
#endif /* CONFIG_INTEL_IOMMU_SVM */ #endif /* CONFIG_INTEL_IOMMU_SVM */
#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL)) #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
#endif /* __INTEL_SVM_H__ */ #endif /* __INTEL_SVM_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册