提交 a1fdb30f 编写于 作者: R Robin Murphy 提交者: Joseph Qi

iommu/arm-smmu: Support non-strict mode

commit 44f6876a00e83df5fd28681502b19b0f51e4a3c6 upstream

All we need is to wire up .flush_iotlb_all properly and implement the
domain attribute, and iommu-dma and io-pgtable will do the rest for us.
The only real subtlety is documenting the barrier semantics we're
introducing between io-pgtable and the drivers for non-strict flushes.
Signed-off-by: NRobin Murphy <robin.murphy@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NZou Cao <zoucao@linux.alibaba.com>
Reviewed-by: NBaoyou Xie <xie.baoyou@linux.alibaba.com>
上级 989011e7
......@@ -256,6 +256,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
......@@ -457,7 +458,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
/*
* NOTE: this is not a relaxed write; it needs to guarantee that PTEs
* cleared by the current CPU are visible to the SMMU before the TLBI.
*/
writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie);
}
......@@ -467,7 +472,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu);
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
/* NOTE: see above */
writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu);
}
......@@ -879,6 +885,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
......@@ -1268,6 +1277,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->tlb_ops)
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
}
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
......@@ -1486,15 +1503,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
switch(domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = smmu_domain->non_strict;
return 0;
default:
return -ENODEV;
}
break;
default:
return -ENODEV;
return -EINVAL;
}
}
......@@ -1504,28 +1533,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
switch(domain->type) {
case IOMMU_DOMAIN_UNMANAGED:
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu) {
ret = -EPERM;
goto out_unlock;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
}
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
smmu_domain->non_strict = *(int *)data;
break;
default:
ret = -ENODEV;
}
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
default:
ret = -ENODEV;
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
......@@ -1578,7 +1617,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册