提交 640ae486 编写于 作者: E Eric Auger 提交者: Zheng Zengkai

iommu/smmuv3: Allow stage 1 invalidation with unmanaged ASIDs

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I401IF
CVE: NA

------------------------------

With nested stage support, soon we will need to invalidate
S1 contexts and ranges tagged with an unmanaged asid, this
latter being managed by the guest. So let's introduce 2 helpers
that allow to invalidate with externally managed ASIDs
Signed-off-by: NEric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang<jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 1a2bb574
...@@ -1980,9 +1980,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -1980,9 +1980,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
} }
/* IO_PGTABLE API */ /* IO_PGTABLE API */
static void arm_smmu_tlb_inv_context(void *cookie) static void __arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain,
int ext_asid)
{ {
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
...@@ -1993,7 +1993,13 @@ static void arm_smmu_tlb_inv_context(void *cookie) ...@@ -1993,7 +1993,13 @@ static void arm_smmu_tlb_inv_context(void *cookie)
* insertion to guarantee those are observed before the TLBI. Do be * insertion to guarantee those are observed before the TLBI. Do be
* careful, 007. * careful, 007.
*/ */
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { if (ext_asid >= 0) { /* guest stage 1 invalidation */
cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
cmd.tlbi.asid = ext_asid;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid); arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
} else { } else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
...@@ -2009,6 +2015,13 @@ static void arm_smmu_tlb_inv_context(void *cookie) ...@@ -2009,6 +2015,13 @@ static void arm_smmu_tlb_inv_context(void *cookie)
} }
static void arm_smmu_tlb_inv_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
__arm_smmu_tlb_inv_context(smmu_domain, -1);
}
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
unsigned long iova, size_t size, unsigned long iova, size_t size,
size_t granule, size_t granule,
...@@ -2068,9 +2081,10 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -2068,9 +2081,10 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
arm_smmu_cmdq_batch_submit(smmu, &cmds); arm_smmu_cmdq_batch_submit(smmu, &cmds);
} }
static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, static void
size_t granule, bool leaf, arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain) size_t granule, bool leaf, int ext_asid,
struct arm_smmu_domain *smmu_domain)
{ {
struct arm_smmu_cmdq_ent cmd = { struct arm_smmu_cmdq_ent cmd = {
.tlbi = { .tlbi = {
...@@ -2078,7 +2092,16 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, ...@@ -2078,7 +2092,16 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
}, },
}; };
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { if (ext_asid >= 0) { /* guest stage 1 invalidation */
/*
* At the moment the guest only uses NS-EL1, to be
* revisited when nested virt gets supported with E2H
* exposed.
*/
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = ext_asid;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA; CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
...@@ -2086,6 +2109,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, ...@@ -2086,6 +2109,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
cmd.opcode = CMDQ_OP_TLBI_S2_IPA; cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
} }
__arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
/* /*
...@@ -2128,7 +2152,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, ...@@ -2128,7 +2152,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie) size_t granule, void *cookie)
{ {
arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie); arm_smmu_tlb_inv_range_domain(iova, size, granule, false, -1, cookie);
} }
static const struct iommu_flush_ops arm_smmu_flush_ops = { static const struct iommu_flush_ops arm_smmu_flush_ops = {
...@@ -2764,8 +2788,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, ...@@ -2764,8 +2788,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
arm_smmu_tlb_inv_range_domain(gather->start, gather->end - gather->start + 1, arm_smmu_tlb_inv_range_domain(gather->start,
gather->pgsize, true, smmu_domain); gather->end - gather->start + 1,
gather->pgsize, true, -1, smmu_domain);
} }
static phys_addr_t static phys_addr_t
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册