提交 55d7a2b1 编写于 作者: Z Zhen Lei 提交者: Zheng Zengkai

iommu/arm-smmu-v3: Ensure that a set of associated commands are inserted in the same ECMDQ

hulk inclusion
category: feature
bugzilla: 174251
CVE: NA

-------------------------------------------------------------------------

The SYNC command only ensures that the command that precedes it in the
same ECMDQ must be executed, but cannot synchronize the commands in other
ECMDQs. If an unmap involves multiple commands, some commands are executed
on one core, and the other commands are executed on another core. In this
case, after the SYNC execution is complete, the execution of all preceded
commands can not be ensured.

Prevent the process that performs a set of associated commands insertion
from being migrated to other cores ensures that all commands are inserted
into the same ECMDQ.
Signed-off-by: NZhen Lei <thunder.leizhen@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 ba3b3b12
...@@ -236,6 +236,18 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) ...@@ -236,6 +236,18 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
return 0; return 0;
} }
static void arm_smmu_preempt_disable(struct arm_smmu_device *smmu)
{
if (smmu->ecmdq_enabled)
preempt_disable();
}
static void arm_smmu_preempt_enable(struct arm_smmu_device *smmu)
{
if (smmu->ecmdq_enabled)
preempt_enable();
}
/* High-level queue accessors */ /* High-level queue accessors */
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{ {
...@@ -1037,6 +1049,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, ...@@ -1037,6 +1049,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
}, },
}; };
arm_smmu_preempt_disable(smmu);
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
...@@ -1047,6 +1060,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, ...@@ -1047,6 +1060,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_cmdq_batch_submit(smmu, &cmds); arm_smmu_cmdq_batch_submit(smmu, &cmds);
arm_smmu_preempt_enable(smmu);
} }
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
...@@ -2032,30 +2046,36 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, ...@@ -2032,30 +2046,36 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, unsigned int ssid) static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, unsigned int ssid)
{ {
int i; int i, ret;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_device *smmu = master->smmu;
arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
arm_smmu_preempt_disable(smmu);
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id; cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
} }
return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); ret = arm_smmu_cmdq_batch_submit(smmu, &cmds);
arm_smmu_preempt_enable(smmu);
return ret;
} }
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
int i; int i, ret;
unsigned long flags; unsigned long flags;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_device *smmu = smmu_domain->smmu;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) if (!(smmu->features & ARM_SMMU_FEAT_ATS))
return 0; return 0;
/* /*
...@@ -2077,6 +2097,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -2077,6 +2097,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
arm_smmu_preempt_disable(smmu);
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master, &smmu_domain->devices, domain_head) {
if (!master->ats_enabled) if (!master->ats_enabled)
...@@ -2084,12 +2105,15 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -2084,12 +2105,15 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id; cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
} }
} }
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); ret = arm_smmu_cmdq_batch_submit(smmu, &cmds);
arm_smmu_preempt_enable(smmu);
return ret;
} }
/* IO_PGTABLE API */ /* IO_PGTABLE API */
...@@ -2170,6 +2194,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -2170,6 +2194,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages = size >> tg; num_pages = size >> tg;
} }
arm_smmu_preempt_disable(smmu);
while (iova < end) { while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/* /*
...@@ -2201,6 +2226,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -2201,6 +2226,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
iova += inv_range; iova += inv_range;
} }
arm_smmu_cmdq_batch_submit(smmu, &cmds); arm_smmu_cmdq_batch_submit(smmu, &cmds);
arm_smmu_preempt_enable(smmu);
} }
static void static void
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册