diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7e465dde9d3c645f5e8b02a99f885e497205e694..ed8d987551c6cb3ac158ff2ec42cfb1bb1ebbc94 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1578,6 +1578,51 @@ int iommu_sva_invalidate(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_sva_invalidate); +int iommu_page_response(struct device *dev, + struct page_response_msg *msg) +{ + struct iommu_param *param = dev->iommu_param; + int ret = -EINVAL; + struct iommu_fault_event *evt, *iter; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + + if (!domain || !domain->ops->page_response) + return -ENODEV; + + /* + * Device iommu_param should have been allocated when device is + * added to its iommu_group. + */ + if (!param || !param->fault_param) + return -EINVAL; + + /* Only send response if there is a fault report pending */ + mutex_lock(¶m->fault_param->lock); + if (list_empty(¶m->fault_param->faults)) { + pr_warn("no pending PRQ, drop response\n"); + goto done_unlock; + } + /* + * Check if we have a matching page request pending to respond, + * otherwise return -EINVAL + */ + list_for_each_entry_safe(evt, iter, ¶m->fault_param->faults, list) { + if (evt->pasid == msg->pasid && + msg->page_req_group_id == evt->page_req_group_id) { + msg->private_data = evt->iommu_private; + ret = domain->ops->page_response(dev, msg); + list_del(&evt->list); + kfree(evt); + break; + } + } + +done_unlock: + mutex_unlock(¶m->fault_param->lock); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_page_response); + static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7a25981570ccbbe09ad739d15e2868f930673bd1..3fa45d2928131f12604a9680acd002476eaebec4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -163,6 +163,55 @@ struct iommu_resv_region { #ifdef CONFIG_IOMMU_API +/** + * enum page_response_code - Return status of fault handlers, telling the IOMMU + * driver how to proceed with the fault. + * + * @IOMMU_FAULT_STATUS_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is "Success" in PCI PRI. + * @IOMMU_FAULT_STATUS_FAILURE: General error. Drop all subsequent faults from + * this device if possible. This is "Response Failure" in PCI PRI. + * @IOMMU_FAULT_STATUS_INVALID: Could not handle this fault, don't retry the + * access. This is "Invalid Request" in PCI PRI. + */ +enum page_response_code { + IOMMU_PAGE_RESP_SUCCESS = 0, + IOMMU_PAGE_RESP_INVALID, + IOMMU_PAGE_RESP_FAILURE, +}; + +/** + * enum page_request_handle_t - Return page request/response handler status + * + * @IOMMU_FAULT_STATUS_HANDLED: Stop processing the fault, and do not send a + * reply to the device. + * @IOMMU_FAULT_STATUS_CONTINUE: Fault was not handled. Call the next handler, + * or terminate. + */ +enum page_request_handle_t { + IOMMU_PAGE_RESP_HANDLED = 0, + IOMMU_PAGE_RESP_CONTINUE, +}; + +/** + * Generic page response information based on PCI ATS and PASID spec. + * @addr: servicing page address + * @pasid: contains process address space ID + * @resp_code: response code + * @page_req_group_id: page request group index + * @type: group or stream/single page response + * @private_data: uniquely identify device-specific private data for an + * individual page response + */ +struct page_response_msg { + u64 addr; + u32 pasid; + enum page_response_code resp_code; + u32 pasid_present:1; + u32 page_req_group_id; + u64 private_data; +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -194,6 +243,7 @@ struct iommu_resv_region { * @bind_pasid_table: bind pasid table pointer for guest SVM * @unbind_pasid_table: unbind pasid table pointer and restore defaults * @sva_invalidate: invalidate translation caches of shared virtual address + * @page_response: handle page request response */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -247,6 +297,7 @@ struct iommu_ops { struct device *dev); int (*sva_invalidate)(struct iommu_domain *domain, struct device *dev, struct tlb_invalidate_info *inv_info); + int (*page_response)(struct device *dev, struct page_response_msg *msg); unsigned long pgsize_bitmap; }; @@ -469,6 +520,8 @@ extern int iommu_unregister_device_fault_handler(struct device *dev); extern int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt); +extern int iommu_page_response(struct device *dev, + struct page_response_msg *msg); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);