提交 6788d60e 编写于 作者: W Will Deacon 提交者: Cheng Jian

iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops

mainline inclusion
from mainline-v5.4-rc1
commit 298f7889
category: bugfix
bugzilla: 21306
CVE: NA

-------------------------------------------------------------------------

In preparation for TLB flush gathering in the IOMMU API, rename the
iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which
better describes its purpose and avoids the potential for confusion
between different levels of the API.

$ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g'
Signed-off-by: NWill Deacon <will@kernel.org>
Conflicts:
	drivers/gpu/drm/panfrost/panfrost_mmu.c		not exist
Signed-off-by: NZhen Lei <thunder.leizhen@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 9349d3a6
...@@ -1769,7 +1769,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -1769,7 +1769,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} while (size -= granule); } while (size -= granule);
} }
static const struct iommu_gather_ops arm_smmu_gather_ops = { static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync, .tlb_sync = arm_smmu_tlb_sync,
...@@ -2044,7 +2044,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain, ...@@ -2044,7 +2044,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_gather_ops, .tlb = &arm_smmu_flush_ops,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
......
...@@ -253,7 +253,7 @@ enum arm_smmu_domain_stage { ...@@ -253,7 +253,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
const struct iommu_gather_ops *tlb_ops; const struct iommu_flush_ops *tlb_ops;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
bool non_strict; bool non_strict;
...@@ -535,19 +535,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, ...@@ -535,19 +535,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
} }
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
.tlb_sync = arm_smmu_tlb_sync_vmid, .tlb_sync = arm_smmu_tlb_sync_vmid,
......
...@@ -825,7 +825,7 @@ static void dummy_tlb_sync(void *cookie) ...@@ -825,7 +825,7 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
} }
static const struct iommu_gather_ops dummy_tlb_ops = { static const struct iommu_flush_ops dummy_tlb_ops = {
.tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush, .tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync, .tlb_sync = dummy_tlb_sync,
......
...@@ -986,7 +986,7 @@ static void dummy_tlb_sync(void *cookie) ...@@ -986,7 +986,7 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
} }
static const struct iommu_gather_ops dummy_tlb_ops __initconst = { static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush, .tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync, .tlb_sync = dummy_tlb_sync,
......
...@@ -360,7 +360,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, ...@@ -360,7 +360,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
/* The hardware doesn't support selective TLB flush. */ /* The hardware doesn't support selective TLB flush. */
} }
static const struct iommu_gather_ops ipmmu_gather_ops = { static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_add_flush = ipmmu_tlb_add_flush, .tlb_add_flush = ipmmu_tlb_add_flush,
.tlb_sync = ipmmu_tlb_flush_all, .tlb_sync = ipmmu_tlb_flush_all,
...@@ -424,7 +424,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) ...@@ -424,7 +424,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
domain->cfg.ias = 32; domain->cfg.ias = 32;
domain->cfg.oas = 40; domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops; domain->cfg.tlb = &ipmmu_flush_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true; domain->io_domain.geometry.force_aperture = true;
/* /*
......
...@@ -189,7 +189,7 @@ static void __flush_iotlb_sync(void *cookie) ...@@ -189,7 +189,7 @@ static void __flush_iotlb_sync(void *cookie)
*/ */
} }
static const struct iommu_gather_ops msm_iommu_gather_ops = { static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb, .tlb_flush_all = __flush_iotlb,
.tlb_add_flush = __flush_iotlb_range, .tlb_add_flush = __flush_iotlb_range,
.tlb_sync = __flush_iotlb_sync, .tlb_sync = __flush_iotlb_sync,
...@@ -356,7 +356,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) ...@@ -356,7 +356,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
.ias = 32, .ias = 32,
.oas = 32, .oas = 32,
.tlb = &msm_iommu_gather_ops, .tlb = &msm_iommu_flush_ops,
.iommu_dev = priv->dev, .iommu_dev = priv->dev,
}; };
......
...@@ -196,7 +196,7 @@ static void mtk_iommu_tlb_sync(void *cookie) ...@@ -196,7 +196,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
} }
} }
static const struct iommu_gather_ops mtk_iommu_gather_ops = { static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_all = mtk_iommu_tlb_flush_all,
.tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
.tlb_sync = mtk_iommu_tlb_sync, .tlb_sync = mtk_iommu_tlb_sync,
...@@ -275,7 +275,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) ...@@ -275,7 +275,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
.ias = 32, .ias = 32,
.oas = 32, .oas = 32,
.tlb = &mtk_iommu_gather_ops, .tlb = &mtk_iommu_flush_ops,
.iommu_dev = data->dev, .iommu_dev = data->dev,
}; };
......
...@@ -175,7 +175,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -175,7 +175,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} }
} }
static const struct iommu_gather_ops qcom_gather_ops = { static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_flush_all = qcom_iommu_tlb_inv_context,
.tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
.tlb_sync = qcom_iommu_tlb_sync, .tlb_sync = qcom_iommu_tlb_sync,
...@@ -226,7 +226,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -226,7 +226,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
.pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
.ias = 32, .ias = 32,
.oas = 40, .oas = 40,
.tlb = &qcom_gather_ops, .tlb = &qcom_flush_ops,
.iommu_dev = qcom_iommu->dev, .iommu_dev = qcom_iommu->dev,
}; };
......
...@@ -16,7 +16,7 @@ enum io_pgtable_fmt { ...@@ -16,7 +16,7 @@ enum io_pgtable_fmt {
}; };
/** /**
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
* *
* @tlb_flush_all: Synchronously invalidate the entire TLB context. * @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
...@@ -27,7 +27,7 @@ enum io_pgtable_fmt { ...@@ -27,7 +27,7 @@ enum io_pgtable_fmt {
* Note that these can all be called in atomic context and must therefore * Note that these can all be called in atomic context and must therefore
* not block. * not block.
*/ */
struct iommu_gather_ops { struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie); void (*tlb_flush_all)(void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie); bool leaf, void *cookie);
...@@ -83,7 +83,7 @@ struct io_pgtable_cfg { ...@@ -83,7 +83,7 @@ struct io_pgtable_cfg {
unsigned int ias; unsigned int ias;
unsigned int oas; unsigned int oas;
bool coherent_walk; bool coherent_walk;
const struct iommu_gather_ops *tlb; const struct iommu_flush_ops *tlb;
struct device *iommu_dev; struct device *iommu_dev;
/* Low-level data specific to the table format */ /* Low-level data specific to the table format */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册