提交 bdc8c00c 编写于 作者: K Kunkun Jiang 提交者: Zheng Zengkai

iommu/io-pgtable-arm: Add and realize clear_dirty_log ops

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZUKK
CVE: NA

------------------------------

After dirty log is retrieved, user should clear dirty log to re-enable
dirty log tracking for these dirtied pages. This clears the dirty state
(As we just set DBM bit for stage1 mapping, so should set the AP[2] bit)
of these leaf TTDs that are specified by the user provided bitmap.
Co-developed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 f92820e8
无相关合并请求
......@@ -966,6 +966,98 @@ static int arm_lpae_sync_dirty_log(struct io_pgtable_ops *ops,
bitmap, base_iova, bitmap_pgshift);
}
static int __arm_lpae_clear_dirty_log(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size,
int lvl, arm_lpae_iopte *ptep,
unsigned long *bitmap,
unsigned long base_iova,
unsigned long bitmap_pgshift)
{
arm_lpae_iopte pte;
struct io_pgtable *iop = &data->iop;
unsigned long offset;
size_t base, next_size;
int nbits, ret, i;
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return -EINVAL;
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
pte = READ_ONCE(*ptep);
if (WARN_ON(!pte))
return -EINVAL;
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
if (iopte_leaf(pte, lvl, iop->fmt)) {
if (pte & ARM_LPAE_PTE_AP_RDONLY)
return 0;
/* Ensure all corresponding bits are set */
nbits = size >> bitmap_pgshift;
offset = (iova - base_iova) >> bitmap_pgshift;
for (i = offset; i < offset + nbits; i++) {
if (!test_bit(i, bitmap))
return 0;
}
/* Race does not exist */
pte |= ARM_LPAE_PTE_AP_RDONLY;
__arm_lpae_set_pte(ptep, pte, &iop->cfg);
return 0;
}
/* Current level is table, traverse next level */
next_size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);
ptep = iopte_deref(pte, data);
for (base = 0; base < size; base += next_size) {
ret = __arm_lpae_clear_dirty_log(data, iova + base,
next_size, lvl + 1, ptep, bitmap,
base_iova, bitmap_pgshift);
if (ret)
return ret;
}
return 0;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/* Though the size is too small, it is already clean */
if (pte & ARM_LPAE_PTE_AP_RDONLY)
return 0;
return -EINVAL;
}
/* Keep on walkin */
ptep = iopte_deref(pte, data);
return __arm_lpae_clear_dirty_log(data, iova, size, lvl + 1, ptep,
bitmap, base_iova, bitmap_pgshift);
}
static int arm_lpae_clear_dirty_log(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long *bitmap,
unsigned long base_iova,
unsigned long bitmap_pgshift)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
int lvl = data->start_level;
long iaext = (s64)iova >> cfg->ias;
if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
return -EINVAL;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
iaext = ~iaext;
if (WARN_ON(iaext))
return -EINVAL;
if (data->iop.fmt != ARM_64_LPAE_S1 &&
data->iop.fmt != ARM_32_LPAE_S1)
return -EINVAL;
return __arm_lpae_clear_dirty_log(data, iova, size, lvl, ptep,
bitmap, base_iova, bitmap_pgshift);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
unsigned long granule, page_sizes;
......@@ -1047,6 +1139,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.split_block = arm_lpae_split_block,
.merge_page = arm_lpae_merge_page,
.sync_dirty_log = arm_lpae_sync_dirty_log,
.clear_dirty_log = arm_lpae_clear_dirty_log,
};
return data;
......
......@@ -175,6 +175,10 @@ struct io_pgtable_ops {
unsigned long iova, size_t size,
unsigned long *bitmap, unsigned long base_iova,
unsigned long bitmap_pgshift);
int (*clear_dirty_log)(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long *bitmap, unsigned long base_iova,
unsigned long bitmap_pgshift);
};
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部