提交 eb4c8a49 编写于 作者: J Jacob Pan 提交者: Xie XiuQi

iommu: handle page response timeout

hulk inclusion
category: feature
bugzilla: 14369
CVE: NA
-------------------

When IO page faults are reported outside IOMMU subsystem, the page
request handler may fail for various reasons. E.g. a guest received
page requests but did not have a chance to run for a long time. The
irresponsive behavior could hold off limited resources on the pending
device.
There can be hardware or credit based software solutions as suggested
in the PCI ATS Ch-4. To provide a basic safety net this patch
introduces a per device deferrable timer which monitors the longest
pending page fault that requires a response. Proper action such as
sending failure response code could be taken when timer expires but not
included in this patch. We need to consider the life cycle of page
groupd ID to prevent confusion with reused group ID by a device.
For now, a warning message provides clue of such failure.
Signed-off-by: NJacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: NAshok Raj <ashok.raj@intel.com>
Signed-off-by: NFang Lijun <fanglijun3@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Reviewed-by: NZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 a75dedeb
...@@ -867,6 +867,41 @@ int iommu_group_unregister_notifier(struct iommu_group *group, ...@@ -867,6 +867,41 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
} }
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
/* Max time to wait for a pending page request */
#define IOMMU_PAGE_RESPONSE_MAXTIME (HZ * 10)
static void iommu_dev_fault_timer_fn(struct timer_list *t)
{
struct iommu_fault_param *fparam = from_timer(fparam, t, timer);
struct iommu_fault_event *evt, *iter;
u64 now;
now = get_jiffies_64();
/* The goal is to ensure driver or guest page fault handler(via vfio)
* send page response on time. Otherwise, limited queue resources
* may be occupied by some irresponsive guests or drivers.
* When per device pending fault list is not empty,
* we periodically checks
* if any anticipated page response time has expired.
*
* TODO:
* We could do the following if response time expires:
* 1. send page response code FAILURE to all pending PRQ
* 2. inform device driver or vfio
* 3. drain in-flight page requests and responses for this device
* 4. clear pending fault list such that driver can unregister fault
* handler(otherwise blocked when pending faults are present).
*/
list_for_each_entry_safe(evt, iter, &fparam->faults, list) {
if (time_after64(evt->expire, now))
pr_err("Page response time expired!, pasid %d gid %d exp %llu now %llu\n",
evt->pasid, evt->page_req_group_id,
evt->expire, now);
}
mod_timer(t, now + IOMMU_PAGE_RESPONSE_MAXTIME);
}
/** /**
* iommu_register_device_fault_handler() - Register a device fault handler * iommu_register_device_fault_handler() - Register a device fault handler
* @dev: the device * @dev: the device
...@@ -874,8 +909,8 @@ EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); ...@@ -874,8 +909,8 @@ EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
* @data: private data passed as argument to the handler * @data: private data passed as argument to the handler
* *
* When an IOMMU fault event is received, call this handler with the fault event * When an IOMMU fault event is received, call this handler with the fault event
* and data as argument. The handler should return 0. If the fault is * and data as argument. The handler should return 0 on success. If the fault is
* recoverable (IOMMU_FAULT_PAGE_REQ), the handler must also complete * recoverable (IOMMU_FAULT_PAGE_REQ), the handler can also complete
* the fault by calling iommu_page_response() with one of the following * the fault by calling iommu_page_response() with one of the following
* response code: * response code:
* - IOMMU_PAGE_RESP_SUCCESS: retry the translation * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
...@@ -916,6 +951,9 @@ int iommu_register_device_fault_handler(struct device *dev, ...@@ -916,6 +951,9 @@ int iommu_register_device_fault_handler(struct device *dev,
param->fault_param->data = data; param->fault_param->data = data;
INIT_LIST_HEAD(&param->fault_param->faults); INIT_LIST_HEAD(&param->fault_param->faults);
timer_setup(&param->fault_param->timer, iommu_dev_fault_timer_fn,
TIMER_DEFERRABLE);
mutex_unlock(&param->lock); mutex_unlock(&param->lock);
return 0; return 0;
...@@ -973,6 +1011,8 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) ...@@ -973,6 +1011,8 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{ {
int ret = 0; int ret = 0;
struct iommu_fault_event *evt_pending; struct iommu_fault_event *evt_pending;
struct timer_list *tmr;
u64 exp;
struct iommu_fault_param *fparam; struct iommu_fault_param *fparam;
/* iommu_param is allocated when device is added to group */ /* iommu_param is allocated when device is added to group */
...@@ -993,6 +1033,17 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) ...@@ -993,6 +1033,17 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
goto done_unlock; goto done_unlock;
} }
memcpy(evt_pending, evt, sizeof(struct iommu_fault_event)); memcpy(evt_pending, evt, sizeof(struct iommu_fault_event));
/* Keep track of response expiration time */
exp = get_jiffies_64() + IOMMU_PAGE_RESPONSE_MAXTIME;
evt_pending->expire = exp;
if (list_empty(&fparam->faults)) {
/* First pending event, start timer */
tmr = &dev->iommu_param->fault_param->timer;
WARN_ON(timer_pending(tmr));
mod_timer(tmr, exp);
}
mutex_lock(&fparam->lock); mutex_lock(&fparam->lock);
list_add_tail(&evt_pending->list, &fparam->faults); list_add_tail(&evt_pending->list, &fparam->faults);
mutex_unlock(&fparam->lock); mutex_unlock(&fparam->lock);
...@@ -1617,6 +1668,13 @@ int iommu_page_response(struct device *dev, ...@@ -1617,6 +1668,13 @@ int iommu_page_response(struct device *dev,
} }
} }
/* stop response timer if no more pending request */
if (list_empty(&param->fault_param->faults) &&
timer_pending(&param->fault_param->timer)) {
pr_debug("no pending PRQ, stop timer\n");
del_timer(&param->fault_param->timer);
}
done_unlock: done_unlock:
mutex_unlock(&param->fault_param->lock); mutex_unlock(&param->fault_param->lock);
return ret; return ret;
......
...@@ -372,6 +372,7 @@ enum iommu_fault_reason { ...@@ -372,6 +372,7 @@ enum iommu_fault_reason {
* @iommu_private: used by the IOMMU driver for storing fault-specific * @iommu_private: used by the IOMMU driver for storing fault-specific
* data. Users should not modify this field before * data. Users should not modify this field before
* sending the fault response. * sending the fault response.
* @expire: time limit in jiffies will wait for page response
*/ */
struct iommu_fault_event { struct iommu_fault_event {
struct list_head list; struct list_head list;
...@@ -385,6 +386,7 @@ struct iommu_fault_event { ...@@ -385,6 +386,7 @@ struct iommu_fault_event {
u32 prot; u32 prot;
u64 device_private; u64 device_private;
u64 iommu_private; u64 iommu_private;
u64 expire;
}; };
/** /**
...@@ -392,11 +394,13 @@ struct iommu_fault_event { ...@@ -392,11 +394,13 @@ struct iommu_fault_event {
* @dev_fault_handler: Callback function to handle IOMMU faults at device level * @dev_fault_handler: Callback function to handle IOMMU faults at device level
* @data: handler private data * @data: handler private data
* @faults: holds the pending faults which needs response, e.g. page response. * @faults: holds the pending faults which needs response, e.g. page response.
* @timer: track page request pending time limit
* @lock: protect pending PRQ event list * @lock: protect pending PRQ event list
*/ */
struct iommu_fault_param { struct iommu_fault_param {
iommu_dev_fault_handler_t handler; iommu_dev_fault_handler_t handler;
struct list_head faults; struct list_head faults;
struct timer_list timer;
struct mutex lock; struct mutex lock;
void *data; void *data;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册