提交 c97e6be2 编写于 作者: J Jean-Philippe Brucker 提交者: Zheng Zengkai

iommu/arm-smmu-v3: Add support for PRI

maillist inclusion
category: feature
bugzilla: 51855
CVE: NA

Reference: https://jpbrucker.net/git/linux/commit/?h=sva/2021-03-01&id=b6805ab77b4ca7a844ce3b1c4584133e81e3f071

---------------------------------------------

For PCI devices that support it, enable the PRI capability and handle
PRI Page Requests with the generic fault handler. It is enabled when
device driver enables IOMMU_DEV_FEAT_SVA.
Signed-off-by: NJean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: NLijun Fang <fanglijun3@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 52060f07
......@@ -372,6 +372,19 @@ arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
void arm_smmu_sva_unbind(struct iommu_sva *handle)
{
struct arm_smmu_bond *bond = sva_to_bond(handle);
struct arm_smmu_master *master = dev_iommu_priv_get(handle->dev);
/*
* For stall, the event queue does not need to be flushed since the
* device driver ensured all transaction are complete. For PRI however,
* although the device driver has stopped all DMA for this PASID, it may
* have left Page Requests in flight (if using the Stop Marker Message
* to stop PASID). Complete them.
*/
if (master->pri_supported) {
arm_smmu_flush_priq(master->smmu);
iopf_queue_flush_dev(handle->dev);
}
mutex_lock(&sva_lock);
if (refcount_dec_and_test(&bond->refs)) {
......@@ -441,7 +454,7 @@ bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
if (master->num_streams != 1)
return false;
return master->stall_enabled;
return master->stall_enabled || master->pri_supported;
}
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
......@@ -466,6 +479,7 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
int ret;
struct iopf_queue *iopfq;
struct device *dev = master->dev;
/*
......@@ -478,16 +492,21 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
if (master->stall_enabled)
iopfq = master->smmu->evtq.iopf;
else if (master->pri_supported)
iopfq = master->smmu->priq.iopf;
else
return -EINVAL;
ret = iopf_queue_add_device(iopfq, dev);
if (ret)
return ret;
ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
if (ret) {
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
return ret;
}
return 0;
if (ret)
iopf_queue_remove_device(iopfq, dev);
return ret;
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
......@@ -498,6 +517,7 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
return;
iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->priq.iopf, dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
......@@ -529,6 +549,28 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
return 0;
}
int arm_smmu_master_enable_iopf(struct arm_smmu_master *master)
{
int ret;
if (master->pri_supported) {
ret = arm_smmu_enable_pri(master);
if (ret)
return ret;
}
master->iopf_enabled = true;
return 0;
}
int arm_smmu_master_disable_iopf(struct arm_smmu_master *master)
{
if (master->sva_enabled)
return -EBUSY;
arm_smmu_disable_pri(master);
master->iopf_enabled = false;
return 0;
}
void arm_smmu_sva_notifier_synchronize(void)
{
/*
......
......@@ -306,14 +306,6 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
switch (ent->pri.resp) {
case PRI_RESP_DENY:
case PRI_RESP_FAIL:
case PRI_RESP_SUCC:
break;
default:
return -EINVAL;
}
cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
break;
case CMDQ_OP_RESUME:
......@@ -885,11 +877,12 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
}
static int arm_smmu_page_response(struct device *dev,
struct iommu_fault_event *unused,
struct iommu_fault_event *evt,
struct iommu_page_response *resp)
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
bool pasid_valid = resp->flags & IOMMU_PAGE_RESP_PASID_VALID;
int sid = master->streams[0].id;
if (master->stall_enabled) {
......@@ -907,6 +900,28 @@ static int arm_smmu_page_response(struct device *dev,
default:
return -EINVAL;
}
} else if (master->pri_supported) {
bool needs_pasid = (evt->fault.prm.flags &
IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID);
cmd.opcode = CMDQ_OP_PRI_RESP;
cmd.substream_valid = needs_pasid && pasid_valid;
cmd.pri.sid = sid;
cmd.pri.ssid = resp->pasid;
cmd.pri.grpid = resp->grpid;
switch (resp->code) {
case IOMMU_PAGE_RESP_FAILURE:
cmd.pri.resp = CMDQ_PRI_1_RESP_FAILURE;
break;
case IOMMU_PAGE_RESP_INVALID:
cmd.pri.resp = CMDQ_PRI_1_RESP_INVALID;
break;
case IOMMU_PAGE_RESP_SUCCESS:
cmd.pri.resp = CMDQ_PRI_1_RESP_SUCCESS;
break;
default:
return -EINVAL;
}
} else {
return -ENODEV;
}
......@@ -1319,6 +1334,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (master->prg_resp_needs_ssid)
dst[1] |= cpu_to_le64(STRTAB_STE_1_PPAR);
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
......@@ -1553,64 +1571,160 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
{
u32 sid, ssid;
u16 grpid;
bool ssv, last;
sid = FIELD_GET(PRIQ_0_SID, evt[0]);
ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
dev_info(smmu->dev, "unexpected PRI request received:\n");
dev_info(smmu->dev,
"\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
sid, ssid, grpid, last ? "L" : "",
evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
evt[0] & PRIQ_0_PERM_READ ? "R" : "",
evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
evt[1] & PRIQ_1_ADDR_MASK);
if (last) {
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_PRI_RESP,
.substream_valid = ssv,
.pri = {
.sid = sid,
.ssid = ssid,
.grpid = grpid,
.resp = PRI_RESP_DENY,
},
bool pasid_valid, last;
struct arm_smmu_master *master;
u32 sid = FIELD_PREP(PRIQ_0_SID, evt[0]);
struct iommu_fault_event fault_evt = {
.fault.type = IOMMU_FAULT_PAGE_REQ,
.fault.prm = {
.grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]),
.addr = evt[1] & PRIQ_1_ADDR_MASK,
},
};
struct iommu_fault_page_request *pr = &fault_evt.fault.prm;
pasid_valid = evt[0] & PRIQ_0_SSID_V;
last = evt[0] & PRIQ_0_PRG_LAST;
/* Discard Stop PASID marker, it isn't used */
if (!(evt[0] & (PRIQ_0_PERM_READ | PRIQ_0_PERM_WRITE)) && last)
return;
if (last)
pr->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
if (pasid_valid) {
pr->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
pr->pasid = FIELD_GET(PRIQ_0_SSID, evt[0]);
}
if (evt[0] & PRIQ_0_PERM_READ)
pr->perm |= IOMMU_FAULT_PERM_READ;
if (evt[0] & PRIQ_0_PERM_WRITE)
pr->perm |= IOMMU_FAULT_PERM_WRITE;
if (evt[0] & PRIQ_0_PERM_EXEC)
pr->perm |= IOMMU_FAULT_PERM_EXEC;
if (evt[0] & PRIQ_0_PERM_PRIV)
pr->perm |= IOMMU_FAULT_PERM_PRIV;
mutex_lock(&smmu->streams_mutex);
master = arm_smmu_find_master(smmu, sid);
if (!master) {
dev_warn(smmu->dev, "Unexpected PPR from unknown SID 0x%x\n", sid);
mutex_unlock(&smmu->streams_mutex);
return;
}
if (pasid_valid && master->prg_resp_needs_ssid)
pr->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
if (iommu_report_device_fault(master->dev, &fault_evt)) {
/*
* No handler registered, so subsequent faults won't produce
* better results. Try to disable PRI.
*/
struct iommu_page_response resp = {
.flags = pasid_valid ?
IOMMU_PAGE_RESP_PASID_VALID : 0,
.pasid = pr->pasid,
.grpid = pr->grpid,
.code = IOMMU_PAGE_RESP_FAILURE,
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
dev_warn(master->dev,
"PPR 0x%x:0x%llx 0x%x: nobody cared, disabling PRI\n",
pasid_valid ? pr->pasid : 0, pr->addr, pr->perm);
if (last)
arm_smmu_page_response(master->dev, &fault_evt, &resp);
}
mutex_unlock(&smmu->streams_mutex);
}
static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
{
int num_handled = 0;
bool overflow = false;
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->priq.q;
struct arm_smmu_priq *priq = &smmu->priq;
struct arm_smmu_queue *q = &priq->q;
struct arm_smmu_ll_queue *llq = &q->llq;
size_t queue_size = 1 << llq->max_n_shift;
u64 evt[PRIQ_ENT_DWORDS];
spin_lock(&priq->wq.lock);
do {
while (!queue_remove_raw(q, evt))
while (!queue_remove_raw(q, evt)) {
spin_unlock(&priq->wq.lock);
arm_smmu_handle_ppr(smmu, evt);
spin_lock(&priq->wq.lock);
if (++num_handled == queue_size) {
priq->batch++;
wake_up_all_locked(&priq->wq);
num_handled = 0;
}
}
if (queue_sync_prod_in(q) == -EOVERFLOW)
if (queue_sync_prod_in(q) == -EOVERFLOW) {
dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
overflow = true;
}
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
Q_IDX(llq, llq->cons);
queue_sync_cons_out(q);
wake_up_all_locked(&priq->wq);
spin_unlock(&priq->wq.lock);
/*
* On overflow, the SMMU might have discarded the last PPR in a group.
* There is no way to know more about it, so we have to discard all
* partial faults already queued.
*/
if (overflow)
iopf_queue_discard_partial(priq->iopf);
return IRQ_HANDLED;
}
/*
* arm_smmu_flush_priq - wait until all events currently in the queue have been
* consumed.
*
* When unbinding a PASID, ensure there aren't any pending page requests for
* that PASID in the queue.
*
* Wait either that the queue becomes empty or, if new events are continually
* added the queue, that the event queue thread has handled a full batch (where
* one batch corresponds to the queue size). For that we take the batch number
* when entering flush() and wait for the event queue thread to increment it
* twice. Note that we don't handle overflows on q->batch. If it occurs, just
* wait for the queue to become empty.
*/
int arm_smmu_flush_priq(struct arm_smmu_device *smmu)
{
int ret;
u64 batch;
bool overflow = false;
struct arm_smmu_priq *priq = &smmu->priq;
struct arm_smmu_queue *q = &priq->q;
spin_lock(&priq->wq.lock);
if (queue_sync_prod_in(q) == -EOVERFLOW) {
dev_err(smmu->dev, "priq overflow detected -- requests lost\n");
overflow = true;
}
batch = priq->batch;
ret = wait_event_interruptible_locked(priq->wq, queue_empty(&q->llq) ||
priq->batch >= batch + 2);
spin_unlock(&priq->wq.lock);
if (overflow)
iopf_queue_discard_partial(priq->iopf);
return ret;
}
static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
......@@ -2344,6 +2458,73 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
pci_disable_pasid(pdev);
}
static int arm_smmu_init_pri(struct arm_smmu_master *master)
{
struct pci_dev *pdev;
if (!dev_is_pci(master->dev))
return -EINVAL;
if (!(master->smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
pdev = to_pci_dev(master->dev);
if (!pci_pri_supported(pdev))
return 0;
/* If the device supports PASID and PRI, set STE.PPAR */
if (master->ssid_bits)
master->prg_resp_needs_ssid = pci_prg_resp_pasid_required(pdev);
master->pri_supported = true;
return 0;
}
int arm_smmu_enable_pri(struct arm_smmu_master *master)
{
int ret;
struct pci_dev *pdev;
/*
* TODO: find a good inflight PPR number. According to the SMMU spec we
* should divide the PRI queue by the number of PRI-capable devices, but
* it's impossible to know about future (probed late or hotplugged)
* devices. So we might miss some PPRs due to queue overflow.
*/
size_t max_inflight_pprs = 16;
if (!master->pri_supported || !master->ats_enabled)
return -ENODEV;
pdev = to_pci_dev(master->dev);
ret = pci_reset_pri(pdev);
if (ret)
return ret;
ret = pci_enable_pri(pdev, max_inflight_pprs);
if (ret) {
dev_err(master->dev, "cannot enable PRI: %d\n", ret);
return ret;
}
return 0;
}
void arm_smmu_disable_pri(struct arm_smmu_master *master)
{
struct pci_dev *pdev;
if (!dev_is_pci(master->dev))
return;
pdev = to_pci_dev(master->dev);
if (!pdev->pri_enabled)
return;
pci_disable_pri(pdev);
}
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
unsigned long flags;
......@@ -2658,6 +2839,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
master->stall_enabled = true;
arm_smmu_init_pri(master);
return &smmu->iommu;
err_free_master:
......@@ -2675,8 +2858,10 @@ static void arm_smmu_release_device(struct device *dev)
return;
master = dev_iommu_priv_get(dev);
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
if (WARN_ON(arm_smmu_master_sva_enabled(master))) {
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
iopf_queue_remove_device(master->smmu->priq.iopf, dev);
}
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
......@@ -2844,8 +3029,7 @@ static int arm_smmu_dev_enable_feature(struct device *dev,
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
master->iopf_enabled = true;
return 0;
return arm_smmu_master_enable_iopf(master);
case IOMMU_DEV_FEAT_SVA:
return arm_smmu_master_enable_sva(master);
default:
......@@ -2863,10 +3047,7 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
return arm_smmu_master_disable_iopf(master);
case IOMMU_DEV_FEAT_SVA:
return arm_smmu_master_disable_sva(master);
default:
......@@ -3008,6 +3189,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
if (smmu->features & ARM_SMMU_FEAT_SVA) {
smmu->priq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
if (!smmu->priq.iopf)
return -ENOMEM;
}
init_waitqueue_head(&smmu->priq.wq);
smmu->priq.batch = 0;
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS,
PRIQ_ENT_DWORDS, "priq");
......@@ -3920,6 +4110,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
iopf_queue_free(smmu->evtq.iopf);
iopf_queue_free(smmu->priq.iopf);
return 0;
}
......
......@@ -226,6 +226,7 @@
#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
#define STRTAB_STE_1_PPAR (1UL << 18)
#define STRTAB_STE_1_S1STALLD (1UL << 27)
#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
......@@ -353,6 +354,9 @@
#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
#define CMDQ_PRI_1_RESP_FAILURE 0UL
#define CMDQ_PRI_1_RESP_INVALID 1UL
#define CMDQ_PRI_1_RESP_SUCCESS 2UL
#define CMDQ_RESUME_0_RESP_TERM 0UL
#define CMDQ_RESUME_0_RESP_RETRY 1UL
......@@ -420,12 +424,6 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
enum pri_resp {
PRI_RESP_DENY = 0,
PRI_RESP_FAIL = 1,
PRI_RESP_SUCC = 2,
};
struct arm_smmu_cmdq_ent {
/* Common fields */
u8 opcode;
......@@ -487,7 +485,7 @@ struct arm_smmu_cmdq_ent {
u32 sid;
u32 ssid;
u16 grpid;
enum pri_resp resp;
u8 resp;
} pri;
#define CMDQ_OP_RESUME 0x44
......@@ -561,6 +559,9 @@ struct arm_smmu_evtq {
struct arm_smmu_priq {
struct arm_smmu_queue q;
struct iopf_queue *iopf;
u64 batch;
wait_queue_head_t wq;
};
/* High-level stream table and context descriptor structures */
......@@ -694,6 +695,8 @@ struct arm_smmu_master {
unsigned int num_streams;
bool ats_enabled;
bool stall_enabled;
bool pri_supported;
bool prg_resp_needs_ssid;
bool sva_enabled;
bool iopf_enabled;
struct list_head bonds;
......@@ -749,6 +752,9 @@ void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
int arm_smmu_enable_pri(struct arm_smmu_master *master);
void arm_smmu_disable_pri(struct arm_smmu_master *master);
int arm_smmu_flush_priq(struct arm_smmu_device *smmu);
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
......@@ -757,6 +763,8 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
int arm_smmu_master_enable_iopf(struct arm_smmu_master *master);
int arm_smmu_master_disable_iopf(struct arm_smmu_master *master);
struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void arm_smmu_sva_unbind(struct iommu_sva *handle);
......@@ -793,6 +801,16 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master
return false;
}
static inline int arm_smmu_master_enable_iopf(struct arm_smmu_master *master)
{
return -ENODEV;
}
static inline int arm_smmu_master_disable_iopf(struct arm_smmu_master *master)
{
return -ENODEV;
}
static inline struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册