diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index be8cc53659f8b0c505b2f42613cc6390a954d6eb..1ee14a59a3d66c7fe795f09b690f2b25240f2174 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -386,7 +386,7 @@ static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) if (smmu->ecmdq_enabled) { struct arm_smmu_ecmdq *ecmdq; - ecmdq = *this_cpu_ptr(smmu->ecmdq); + ecmdq = *this_cpu_ptr(smmu->ecmdqs); return &ecmdq->cmdq; } @@ -485,7 +485,7 @@ static void arm_smmu_ecmdq_skip_err(struct arm_smmu_device *smmu) for (i = 0; i < smmu->nr_ecmdq; i++) { unsigned long flags; - ecmdq = *per_cpu_ptr(smmu->ecmdq, i); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, i); q = &ecmdq->cmdq.q; prod = readl_relaxed(q->prod_reg); @@ -4549,9 +4549,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; } +static int arm_smmu_ecmdq_reset(struct arm_smmu_device *smmu) +{ + int i, cpu, ret = 0; + u32 reg; + + if (!smmu->nr_ecmdq) + return 0; + + i = 0; + for_each_possible_cpu(cpu) { + struct arm_smmu_ecmdq *ecmdq; + struct arm_smmu_queue *q; + + ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu); + if (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)) + continue; + + q = &ecmdq->cmdq.q; + i++; + + if (WARN_ON(q->llq.prod != q->llq.cons)) { + q->llq.prod = 0; + q->llq.cons = 0; + } + writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE); + writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD); + writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS); + + /* enable ecmdq */ + writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg); + ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK, + 1, ARM_SMMU_POLL_TIMEOUT_US); + if (ret) { + dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i); + smmu->ecmdq_enabled = 0; + break; + } + } + + return ret; +} + static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) { - int i; int ret; u32 reg, enables; struct arm_smmu_cmdq_ent cmd; @@ -4599,31 +4640,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); - for (i = 0; i < smmu->nr_ecmdq; i++) { - struct arm_smmu_ecmdq *ecmdq; - struct arm_smmu_queue *q; - - ecmdq = *per_cpu_ptr(smmu->ecmdq, i); - q = &ecmdq->cmdq.q; - - if (WARN_ON(q->llq.prod != q->llq.cons)) { - q->llq.prod = 0; - q->llq.cons = 0; - } - writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE); - writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD); - writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS); - - /* enable ecmdq */ - writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg); - ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK, - 1, ARM_SMMU_POLL_TIMEOUT_US); - if (ret) { - dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i); - smmu->ecmdq_enabled = 0; - break; - } - } + arm_smmu_ecmdq_reset(smmu); enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -4723,10 +4740,11 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) ecmdq = devm_alloc_percpu(smmu->dev, *ecmdq); if (!ecmdq) return -ENOMEM; + smmu->ecmdq = ecmdq; if (num_possible_cpus() <= smmu->nr_ecmdq) { for_each_possible_cpu(cpu) - *per_cpu_ptr(smmu->ecmdq, cpu) = per_cpu_ptr(ecmdq, cpu); + *per_cpu_ptr(smmu->ecmdqs, cpu) = per_cpu_ptr(ecmdq, cpu); /* A core requires at most one ECMDQ */ smmu->nr_ecmdq = num_possible_cpus(); @@ -4763,7 +4781,16 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) * may be left due to truncation rounding. */ nr_ecmdqs[node] = nr_cpus_node(node) * nr_remain / num_possible_cpus(); + } + + for_each_node(node) { + if (!nr_cpus_node(node)) + continue; + nr_remain -= nr_ecmdqs[node]; + + /* An ECMDQ has been reserved for each node at above [1] */ + nr_ecmdqs[node]++; } /* Divide the remaining ECMDQs */ @@ -4781,25 +4808,23 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) } for_each_node(node) { - int i, round, shared = 0; + int i, round, shared; if (!nr_cpus_node(node)) continue; - /* An ECMDQ has been reserved for each node at above [1] */ - nr_ecmdqs[node]++; - + shared = 0; if (nr_ecmdqs[node] < nr_cpus_node(node)) shared = 1; i = 0; for_each_cpu(cpu, cpumask_of_node(node)) { round = i % nr_ecmdqs[node]; - if (i++ < nr_ecmdqs[node]) { + if (i++ < nr_ecmdqs[node]) ecmdqs[round] = per_cpu_ptr(ecmdq, cpu); + else ecmdqs[round]->cmdq.shared = shared; - } - *per_cpu_ptr(smmu->ecmdq, cpu) = ecmdqs[round]; + *per_cpu_ptr(smmu->ecmdqs, cpu) = ecmdqs[round]; } } @@ -4823,6 +4848,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) numq = 1 << FIELD_GET(IDR6_LOG2NUMQ, reg); smmu->nr_ecmdq = nump * numq; gap = ECMDQ_CP_RRESET_SIZE >> FIELD_GET(IDR6_LOG2NUMQ, reg); + if (!smmu->nr_ecmdq) + return -EOPNOTSUPP; smmu_dma_base = (vmalloc_to_pfn(smmu->base) << PAGE_SHIFT); cp_regs = ioremap(smmu_dma_base + ARM_SMMU_ECMDQ_CP_BASE, PAGE_SIZE); @@ -4855,8 +4882,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) if (!cp_base) return -ENOMEM; - smmu->ecmdq = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *); - if (!smmu->ecmdq) + smmu->ecmdqs = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *); + if (!smmu->ecmdqs) return -ENOMEM; ret = arm_smmu_ecmdq_layout(smmu); @@ -4870,7 +4897,7 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) struct arm_smmu_ecmdq *ecmdq; struct arm_smmu_queue *q; - ecmdq = *per_cpu_ptr(smmu->ecmdq, cpu); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu); q = &ecmdq->cmdq.q; /* @@ -4878,10 +4905,11 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) * CPUs. The CPUs that are not selected are not showed in * cpumask_of_node(node), their 'ecmdq' may be NULL. * - * (q->ecmdq_prod & ECMDQ_PROD_EN) indicates that the ECMDQ is - * shared by multiple cores and has been initialized. + * (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)) indicates that the + * ECMDQ is shared by multiple cores and should be initialized + * only by the first owner. */ - if (!ecmdq || (q->ecmdq_prod & ECMDQ_PROD_EN)) + if (!ecmdq || (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu))) continue; ecmdq->base = cp_base + addr; @@ -5324,7 +5352,7 @@ static int arm_smmu_ecmdq_disable(struct device *dev) struct arm_smmu_device *smmu = dev_get_drvdata(dev); for (i = 0; i < smmu->nr_ecmdq; i++) { - ecmdq = *per_cpu_ptr(smmu->ecmdq, i); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, i); q = &ecmdq->cmdq.q; prod = readl_relaxed(q->prod_reg); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index f680cd6dd3bdb291d2e97c9357eae757ccb860f9..919473d2217b161fad53729ee8d9094b218e400f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -718,7 +718,8 @@ struct arm_smmu_device { u32 nr_ecmdq; u32 ecmdq_enabled; }; - struct arm_smmu_ecmdq *__percpu *ecmdq; + struct arm_smmu_ecmdq *__percpu *ecmdqs; + struct arm_smmu_ecmdq __percpu *ecmdq; struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index 4b2ad8dce690a5ee632968092982340900ff5998..d892125eb022a8ef1bf796b5aa5b4a7bcedee60c 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -943,6 +943,16 @@ xlog_recover_buf_commit_pass2( if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { trace_xfs_log_recover_buf_skip(log, buf_f); xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); + + /* + * We're skipping replay of this buffer log item due to the log + * item LSN being behind the ondisk buffer. Verify the buffer + * contents since we aren't going to run the write verifier. + */ + if (bp->b_ops) { + bp->b_ops->verify_read(bp); + error = bp->b_error; + } goto out_release; } diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5b6803cd3299fda05f8641ec743b263e8a81d2f1..f0870b38267813f57c6c8727f97596c616ddd182 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -507,6 +507,7 @@ struct nft_set_binding { }; enum nft_trans_phase; +void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4806169c23490b4f39f13a1b0d1827eb171979cc..936dc1dad2c3ce84656fdae24ee35ef9c47f7952 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, } } +void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) +{ + if (nft_set_is_anonymous(set)) + nft_clear(ctx->net, set); + + set->use++; +} +EXPORT_SYMBOL_GPL(nf_tables_activate_set); + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase) { switch (phase) { case NFT_TRANS_PREPARE: + if (nft_set_is_anonymous(set)) + nft_deactivate_next(ctx->net, set); + set->use--; return; case NFT_TRANS_ABORT: diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 8c45e01fecdd82e7929beaef01e626b887f15deb..038588d4d80e112f01df1513c38b91f4a852c582 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, { struct nft_dynset *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_dynset_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index b0f558b4fea5410184d639501c8cea9578bcc862..8bc008ff00cb742ee026b7179f90e1068be96510 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, { struct nft_lookup *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_lookup_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index bc104d36d3bb28588f401d2f3a74f8c324abc296..25157d8cc2504885ec6aaccfbfbc2a496fd6ba15 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, { struct nft_objref_map *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_objref_map_destroy(const struct nft_ctx *ctx,