未验证 提交 0c374437 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!802 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-32233
CVE-2023-2124

iommu bugfix from Zhen Lei 
 
Link:https://gitee.com/openeuler/kernel/pulls/802 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -386,7 +386,7 @@ static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) ...@@ -386,7 +386,7 @@ static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
if (smmu->ecmdq_enabled) { if (smmu->ecmdq_enabled) {
struct arm_smmu_ecmdq *ecmdq; struct arm_smmu_ecmdq *ecmdq;
ecmdq = *this_cpu_ptr(smmu->ecmdq); ecmdq = *this_cpu_ptr(smmu->ecmdqs);
return &ecmdq->cmdq; return &ecmdq->cmdq;
} }
...@@ -485,7 +485,7 @@ static void arm_smmu_ecmdq_skip_err(struct arm_smmu_device *smmu) ...@@ -485,7 +485,7 @@ static void arm_smmu_ecmdq_skip_err(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->nr_ecmdq; i++) { for (i = 0; i < smmu->nr_ecmdq; i++) {
unsigned long flags; unsigned long flags;
ecmdq = *per_cpu_ptr(smmu->ecmdq, i); ecmdq = *per_cpu_ptr(smmu->ecmdqs, i);
q = &ecmdq->cmdq.q; q = &ecmdq->cmdq.q;
prod = readl_relaxed(q->prod_reg); prod = readl_relaxed(q->prod_reg);
...@@ -4549,9 +4549,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) ...@@ -4549,9 +4549,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret; return ret;
} }
static int arm_smmu_ecmdq_reset(struct arm_smmu_device *smmu)
{
int i, cpu, ret = 0;
u32 reg;
if (!smmu->nr_ecmdq)
return 0;
i = 0;
for_each_possible_cpu(cpu) {
struct arm_smmu_ecmdq *ecmdq;
struct arm_smmu_queue *q;
ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu);
if (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu))
continue;
q = &ecmdq->cmdq.q;
i++;
if (WARN_ON(q->llq.prod != q->llq.cons)) {
q->llq.prod = 0;
q->llq.cons = 0;
}
writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE);
writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD);
writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS);
/* enable ecmdq */
writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg);
ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK,
1, ARM_SMMU_POLL_TIMEOUT_US);
if (ret) {
dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i);
smmu->ecmdq_enabled = 0;
break;
}
}
return ret;
}
static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume)
{ {
int i;
int ret; int ret;
u32 reg, enables; u32 reg, enables;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
...@@ -4599,31 +4640,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) ...@@ -4599,31 +4640,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume)
writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
for (i = 0; i < smmu->nr_ecmdq; i++) { arm_smmu_ecmdq_reset(smmu);
struct arm_smmu_ecmdq *ecmdq;
struct arm_smmu_queue *q;
ecmdq = *per_cpu_ptr(smmu->ecmdq, i);
q = &ecmdq->cmdq.q;
if (WARN_ON(q->llq.prod != q->llq.cons)) {
q->llq.prod = 0;
q->llq.cons = 0;
}
writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE);
writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD);
writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS);
/* enable ecmdq */
writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg);
ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK,
1, ARM_SMMU_POLL_TIMEOUT_US);
if (ret) {
dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i);
smmu->ecmdq_enabled = 0;
break;
}
}
enables = CR0_CMDQEN; enables = CR0_CMDQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
...@@ -4723,10 +4740,11 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) ...@@ -4723,10 +4740,11 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu)
ecmdq = devm_alloc_percpu(smmu->dev, *ecmdq); ecmdq = devm_alloc_percpu(smmu->dev, *ecmdq);
if (!ecmdq) if (!ecmdq)
return -ENOMEM; return -ENOMEM;
smmu->ecmdq = ecmdq;
if (num_possible_cpus() <= smmu->nr_ecmdq) { if (num_possible_cpus() <= smmu->nr_ecmdq) {
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
*per_cpu_ptr(smmu->ecmdq, cpu) = per_cpu_ptr(ecmdq, cpu); *per_cpu_ptr(smmu->ecmdqs, cpu) = per_cpu_ptr(ecmdq, cpu);
/* A core requires at most one ECMDQ */ /* A core requires at most one ECMDQ */
smmu->nr_ecmdq = num_possible_cpus(); smmu->nr_ecmdq = num_possible_cpus();
...@@ -4763,7 +4781,16 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) ...@@ -4763,7 +4781,16 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu)
* may be left due to truncation rounding. * may be left due to truncation rounding.
*/ */
nr_ecmdqs[node] = nr_cpus_node(node) * nr_remain / num_possible_cpus(); nr_ecmdqs[node] = nr_cpus_node(node) * nr_remain / num_possible_cpus();
}
for_each_node(node) {
if (!nr_cpus_node(node))
continue;
nr_remain -= nr_ecmdqs[node]; nr_remain -= nr_ecmdqs[node];
/* An ECMDQ has been reserved for each node at above [1] */
nr_ecmdqs[node]++;
} }
/* Divide the remaining ECMDQs */ /* Divide the remaining ECMDQs */
...@@ -4781,25 +4808,23 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) ...@@ -4781,25 +4808,23 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu)
} }
for_each_node(node) { for_each_node(node) {
int i, round, shared = 0; int i, round, shared;
if (!nr_cpus_node(node)) if (!nr_cpus_node(node))
continue; continue;
/* An ECMDQ has been reserved for each node at above [1] */ shared = 0;
nr_ecmdqs[node]++;
if (nr_ecmdqs[node] < nr_cpus_node(node)) if (nr_ecmdqs[node] < nr_cpus_node(node))
shared = 1; shared = 1;
i = 0; i = 0;
for_each_cpu(cpu, cpumask_of_node(node)) { for_each_cpu(cpu, cpumask_of_node(node)) {
round = i % nr_ecmdqs[node]; round = i % nr_ecmdqs[node];
if (i++ < nr_ecmdqs[node]) { if (i++ < nr_ecmdqs[node])
ecmdqs[round] = per_cpu_ptr(ecmdq, cpu); ecmdqs[round] = per_cpu_ptr(ecmdq, cpu);
else
ecmdqs[round]->cmdq.shared = shared; ecmdqs[round]->cmdq.shared = shared;
} *per_cpu_ptr(smmu->ecmdqs, cpu) = ecmdqs[round];
*per_cpu_ptr(smmu->ecmdq, cpu) = ecmdqs[round];
} }
} }
...@@ -4823,6 +4848,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) ...@@ -4823,6 +4848,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu)
numq = 1 << FIELD_GET(IDR6_LOG2NUMQ, reg); numq = 1 << FIELD_GET(IDR6_LOG2NUMQ, reg);
smmu->nr_ecmdq = nump * numq; smmu->nr_ecmdq = nump * numq;
gap = ECMDQ_CP_RRESET_SIZE >> FIELD_GET(IDR6_LOG2NUMQ, reg); gap = ECMDQ_CP_RRESET_SIZE >> FIELD_GET(IDR6_LOG2NUMQ, reg);
if (!smmu->nr_ecmdq)
return -EOPNOTSUPP;
smmu_dma_base = (vmalloc_to_pfn(smmu->base) << PAGE_SHIFT); smmu_dma_base = (vmalloc_to_pfn(smmu->base) << PAGE_SHIFT);
cp_regs = ioremap(smmu_dma_base + ARM_SMMU_ECMDQ_CP_BASE, PAGE_SIZE); cp_regs = ioremap(smmu_dma_base + ARM_SMMU_ECMDQ_CP_BASE, PAGE_SIZE);
...@@ -4855,8 +4882,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) ...@@ -4855,8 +4882,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu)
if (!cp_base) if (!cp_base)
return -ENOMEM; return -ENOMEM;
smmu->ecmdq = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *); smmu->ecmdqs = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *);
if (!smmu->ecmdq) if (!smmu->ecmdqs)
return -ENOMEM; return -ENOMEM;
ret = arm_smmu_ecmdq_layout(smmu); ret = arm_smmu_ecmdq_layout(smmu);
...@@ -4870,7 +4897,7 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) ...@@ -4870,7 +4897,7 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu)
struct arm_smmu_ecmdq *ecmdq; struct arm_smmu_ecmdq *ecmdq;
struct arm_smmu_queue *q; struct arm_smmu_queue *q;
ecmdq = *per_cpu_ptr(smmu->ecmdq, cpu); ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu);
q = &ecmdq->cmdq.q; q = &ecmdq->cmdq.q;
/* /*
...@@ -4878,10 +4905,11 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) ...@@ -4878,10 +4905,11 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu)
* CPUs. The CPUs that are not selected are not showed in * CPUs. The CPUs that are not selected are not showed in
* cpumask_of_node(node), their 'ecmdq' may be NULL. * cpumask_of_node(node), their 'ecmdq' may be NULL.
* *
* (q->ecmdq_prod & ECMDQ_PROD_EN) indicates that the ECMDQ is * (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)) indicates that the
* shared by multiple cores and has been initialized. * ECMDQ is shared by multiple cores and should be initialized
* only by the first owner.
*/ */
if (!ecmdq || (q->ecmdq_prod & ECMDQ_PROD_EN)) if (!ecmdq || (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)))
continue; continue;
ecmdq->base = cp_base + addr; ecmdq->base = cp_base + addr;
...@@ -5324,7 +5352,7 @@ static int arm_smmu_ecmdq_disable(struct device *dev) ...@@ -5324,7 +5352,7 @@ static int arm_smmu_ecmdq_disable(struct device *dev)
struct arm_smmu_device *smmu = dev_get_drvdata(dev); struct arm_smmu_device *smmu = dev_get_drvdata(dev);
for (i = 0; i < smmu->nr_ecmdq; i++) { for (i = 0; i < smmu->nr_ecmdq; i++) {
ecmdq = *per_cpu_ptr(smmu->ecmdq, i); ecmdq = *per_cpu_ptr(smmu->ecmdqs, i);
q = &ecmdq->cmdq.q; q = &ecmdq->cmdq.q;
prod = readl_relaxed(q->prod_reg); prod = readl_relaxed(q->prod_reg);
......
...@@ -718,7 +718,8 @@ struct arm_smmu_device { ...@@ -718,7 +718,8 @@ struct arm_smmu_device {
u32 nr_ecmdq; u32 nr_ecmdq;
u32 ecmdq_enabled; u32 ecmdq_enabled;
}; };
struct arm_smmu_ecmdq *__percpu *ecmdq; struct arm_smmu_ecmdq *__percpu *ecmdqs;
struct arm_smmu_ecmdq __percpu *ecmdq;
struct arm_smmu_cmdq cmdq; struct arm_smmu_cmdq cmdq;
struct arm_smmu_evtq evtq; struct arm_smmu_evtq evtq;
......
...@@ -943,6 +943,16 @@ xlog_recover_buf_commit_pass2( ...@@ -943,6 +943,16 @@ xlog_recover_buf_commit_pass2(
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f); trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
/*
* We're skipping replay of this buffer log item due to the log
* item LSN being behind the ondisk buffer. Verify the buffer
* contents since we aren't going to run the write verifier.
*/
if (bp->b_ops) {
bp->b_ops->verify_read(bp);
error = bp->b_error;
}
goto out_release; goto out_release;
} }
......
...@@ -507,6 +507,7 @@ struct nft_set_binding { ...@@ -507,6 +507,7 @@ struct nft_set_binding {
}; };
enum nft_trans_phase; enum nft_trans_phase;
void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding, struct nft_set_binding *binding,
enum nft_trans_phase phase); enum nft_trans_phase phase);
......
...@@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, ...@@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
} }
} }
void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (nft_set_is_anonymous(set))
nft_clear(ctx->net, set);
set->use++;
}
EXPORT_SYMBOL_GPL(nf_tables_activate_set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding, struct nft_set_binding *binding,
enum nft_trans_phase phase) enum nft_trans_phase phase)
{ {
switch (phase) { switch (phase) {
case NFT_TRANS_PREPARE: case NFT_TRANS_PREPARE:
if (nft_set_is_anonymous(set))
nft_deactivate_next(ctx->net, set);
set->use--; set->use--;
return; return;
case NFT_TRANS_ABORT: case NFT_TRANS_ABORT:
......
...@@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, ...@@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
{ {
struct nft_dynset *priv = nft_expr_priv(expr); struct nft_dynset *priv = nft_expr_priv(expr);
priv->set->use++; nf_tables_activate_set(ctx, priv->set);
} }
static void nft_dynset_destroy(const struct nft_ctx *ctx, static void nft_dynset_destroy(const struct nft_ctx *ctx,
......
...@@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, ...@@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
{ {
struct nft_lookup *priv = nft_expr_priv(expr); struct nft_lookup *priv = nft_expr_priv(expr);
priv->set->use++; nf_tables_activate_set(ctx, priv->set);
} }
static void nft_lookup_destroy(const struct nft_ctx *ctx, static void nft_lookup_destroy(const struct nft_ctx *ctx,
......
...@@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, ...@@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
{ {
struct nft_objref_map *priv = nft_expr_priv(expr); struct nft_objref_map *priv = nft_expr_priv(expr);
priv->set->use++; nf_tables_activate_set(ctx, priv->set);
} }
static void nft_objref_map_destroy(const struct nft_ctx *ctx, static void nft_objref_map_destroy(const struct nft_ctx *ctx,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册