提交 947019a9 编写于 作者: K Kunkun Jiang 提交者: Zheng Zengkai

iommu/arm-smmu-v3: Realize switch_dirty_log iommu ops

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZUKK
CVE: NA

------------------------------

This realizes switch_dirty_log. In order to get finer dirty
granule, it invokes arm_smmu_split_block when start dirty
log, and invokes arm_smmu_merge_page() to recover block
mapping when stop dirty log.
Co-developed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 829e1611
......@@ -3013,6 +3013,147 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
static int arm_smmu_split_block(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
size_t handled_size;
if (!(smmu->features & (ARM_SMMU_FEAT_BBML1 | ARM_SMMU_FEAT_BBML2))) {
dev_err(smmu->dev, "don't support BBML1/2, can't split block\n");
return -ENODEV;
}
if (!ops || !ops->split_block) {
pr_err("io-pgtable don't realize split block\n");
return -ENODEV;
}
handled_size = ops->split_block(ops, iova, size);
if (handled_size != size) {
pr_err("split block failed\n");
return -EFAULT;
}
return 0;
}
static int __arm_smmu_merge_page(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
size_t handled_size;
if (!ops || !ops->merge_page) {
pr_err("io-pgtable don't realize merge page\n");
return -ENODEV;
}
while (size) {
size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
handled_size = ops->merge_page(ops, iova, paddr, pgsize, prot);
if (handled_size != pgsize) {
pr_err("merge page failed\n");
return -EFAULT;
}
pr_debug("merge handled: iova 0x%lx pa %pa size 0x%zx\n",
iova, &paddr, pgsize);
iova += pgsize;
paddr += pgsize;
size -= pgsize;
}
return 0;
}
static int arm_smmu_merge_page(struct iommu_domain *domain, unsigned long iova,
size_t size, int prot)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
phys_addr_t phys;
dma_addr_t p, i;
size_t cont_size;
int ret = 0;
if (!(smmu->features & (ARM_SMMU_FEAT_BBML1 | ARM_SMMU_FEAT_BBML2))) {
dev_err(smmu->dev, "don't support BBML1/2, can't merge page\n");
return -ENODEV;
}
if (!ops || !ops->iova_to_phys)
return -ENODEV;
while (size) {
phys = ops->iova_to_phys(ops, iova);
cont_size = PAGE_SIZE;
p = phys + cont_size;
i = iova + cont_size;
while (cont_size < size && p == ops->iova_to_phys(ops, i)) {
p += PAGE_SIZE;
i += PAGE_SIZE;
cont_size += PAGE_SIZE;
}
if (cont_size != PAGE_SIZE) {
ret = __arm_smmu_merge_page(domain, iova, phys,
cont_size, prot);
if (ret)
break;
}
iova += cont_size;
size -= cont_size;
}
return ret;
}
static int arm_smmu_switch_dirty_log(struct iommu_domain *domain, bool enable,
unsigned long iova, size_t size, int prot)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
if (!(smmu->features & ARM_SMMU_FEAT_HD))
return -ENODEV;
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -EINVAL;
if (enable) {
/*
* For SMMU, the hardware dirty management is always enabled if
* hardware supports HTTU HD. The action to start dirty log is
* spliting block mapping.
*
* We don't return error even if the split operation fail, as we
* can still track dirty at block granule, which is still a much
* better choice compared to full dirty policy.
*/
arm_smmu_split_block(domain, iova, size);
} else {
/*
* For SMMU, the hardware dirty management is always enabled if
* hardware supports HTTU HD. The action to stop dirty log is
* merging page mapping.
*
* We don't return error even if the merge operation fail, as it
* just effects performace of DMA transaction.
*/
arm_smmu_merge_page(domain, iova, size, prot);
}
return 0;
}
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
......@@ -3250,6 +3391,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.switch_dirty_log = arm_smmu_switch_dirty_log,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
......
......@@ -2517,8 +2517,8 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
static size_t iommu_pgsize(struct iommu_domain *domain,
unsigned long addr_merge, size_t size)
size_t iommu_pgsize(struct iommu_domain *domain,
unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
size_t pgsize;
......@@ -2548,6 +2548,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
EXPORT_SYMBOL_GPL(iommu_pgsize);
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
......
......@@ -487,6 +487,8 @@ extern int iommu_attach_pasid_table(struct iommu_domain *domain,
extern void iommu_detach_pasid_table(struct iommu_domain *domain);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern size_t iommu_pgsize(struct iommu_domain *domain,
unsigned long addr_merge, size_t size);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册