提交 ceaf8cb8 编写于 作者: J Jean-Philippe Brucker 提交者: Xie XiuQi

iommu/arm-smmu-v3: Add SVA feature checking

hulk inclusion
category: feature
bugzilla: 14369
CVE: NA
-------------------

Aggregate all sanity-checks for sharing CPU page tables with the SMMU
under a single ARM_SMMU_FEAT_SVA bit. For PCIe SVA, users also need to
check FEAT_ATS and FEAT_PRI. For platform SVM, they will most likely have
to check FEAT_STALLS.
Signed-off-by: NJean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: NFang Lijun <fanglijun3@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Reviewed-by: NZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6293a138
...@@ -532,6 +532,7 @@ struct arm_smmu_device { ...@@ -532,6 +532,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_VAX (1 << 14) #define ARM_SMMU_FEAT_VAX (1 << 14)
#define ARM_SMMU_FEAT_E2H (1 << 15) #define ARM_SMMU_FEAT_E2H (1 << 15)
#define ARM_SMMU_FEAT_BTM (1 << 16) #define ARM_SMMU_FEAT_BTM (1 << 16)
#define ARM_SMMU_FEAT_SVA (1 << 17)
u32 features; u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
...@@ -2757,6 +2758,76 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) ...@@ -2757,6 +2758,76 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0; return 0;
} }
static bool arm_smmu_supports_sva(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
unsigned long oas;
unsigned long asid_bits;
u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
if ((smmu->features & feat_mask) != feat_mask)
return false;
if (!(smmu->pgsize_bitmap & PAGE_SIZE))
return false;
/*
* Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
* not even pretending to support AArch32 here.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg,
ID_AA64MMFR0_PARANGE_SHIFT);
switch (fld) {
case 0x0:
oas = 32;
break;
case 0x1:
oas = 36;
break;
case 0x2:
oas = 40;
break;
case 0x3:
oas = 42;
break;
case 0x4:
oas = 44;
break;
case 0x5:
oas = 48;
break;
case 0x6:
oas = 52;
break;
default:
return false;
}
/* abort if MMU outputs addresses greater than what we support. */
if (smmu->oas < oas)
return false;
/* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg,
ID_AA64MMFR0_ASID_SHIFT);
asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits)
return false;
/*
* See max_pinned_asids in arch/arm64/mm/context.c. The following is
* generally the maximum number of bindable processes.
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
asid_bits--;
dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
num_possible_cpus() - 2);
return true;
}
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{ {
u32 reg; u32 reg;
...@@ -2958,6 +3029,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) ...@@ -2958,6 +3029,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ias = max(smmu->ias, smmu->oas); smmu->ias = max(smmu->ias, smmu->oas);
if (arm_smmu_supports_sva(smmu))
smmu->features |= ARM_SMMU_FEAT_SVA;
dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
smmu->ias, smmu->oas, smmu->features); smmu->ias, smmu->oas, smmu->features);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册