提交 022b7bdf 编写于 作者: Z Zhou Guanghui 提交者: Zheng Zengkai

mm/sharepool: Support read-only memory allocation

ascend inclusion
category: Bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5DS9S
CVE: NA

--------------------------------

When the driver uses the shared pool memory to share the memory
with the user space, the user space is not allowed to operate
this area. This prevents users from damaging sensitive data.

When the sp_alloc and k2u processes apply for private memory,
read-only memory can be applied for.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NYongqiang Liu <liuyongqiang13@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 5815e08a
......@@ -15,6 +15,7 @@
#define SP_HUGEPAGE_ONLY (1 << 1)
#define SP_DVPP (1 << 2)
#define SP_SPEC_NODE_ID (1 << 3)
#define SP_PROT_RO (1 << 16)
#define DEVICE_ID_BITS 4UL
#define DEVICE_ID_MASK ((1UL << DEVICE_ID_BITS) - 1UL)
......@@ -24,7 +25,7 @@
#define NODE_ID_SHIFT (DEVICE_ID_SHIFT + DEVICE_ID_BITS)
#define SP_FLAG_MASK (SP_HUGEPAGE | SP_HUGEPAGE_ONLY | SP_DVPP | \
SP_SPEC_NODE_ID | \
SP_SPEC_NODE_ID | SP_PROT_RO | \
(DEVICE_ID_MASK << DEVICE_ID_SHIFT) | \
(NODE_ID_MASK << NODE_ID_SHIFT))
......
......@@ -2325,6 +2325,9 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
if (spg_node)
prot = spg_node->prot;
if (ac->sp_flags & SP_PROT_RO)
prot = PROT_READ;
/* when success, mmap_addr == spa->va_start */
mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot);
if (IS_ERR_VALUE(mmap_addr)) {
......@@ -2349,6 +2352,10 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
ret = -EINVAL;
goto unmap;
}
if (ac->sp_flags & SP_PROT_RO)
vma->vm_flags &= ~VM_MAYWRITE;
/* clean PTE_RDONLY flags or trigger SMMU event */
if (prot & PROT_WRITE)
vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
......@@ -2644,6 +2651,9 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
goto put_mm;
}
if (kc && kc->sp_flags & SP_PROT_RO)
prot = PROT_READ;
ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot);
if (IS_ERR_VALUE(ret_addr)) {
pr_debug("k2u mmap failed %lx\n", ret_addr);
......@@ -2656,6 +2666,9 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
if (prot & PROT_WRITE)
vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
if (kc && kc->sp_flags & SP_PROT_RO)
vma->vm_flags &= ~VM_MAYWRITE;
if (is_vm_hugetlb_page(vma)) {
ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
if (ret) {
......@@ -2707,6 +2720,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un
struct sp_area *spa;
struct spg_proc_stat *stat;
unsigned long prot = PROT_READ | PROT_WRITE;
struct sp_k2u_context kc;
down_write(&sp_group_sem);
stat = sp_init_process_stat(current, current->mm, spg_none);
......@@ -2725,8 +2739,8 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un
}
spa->kva = kva;
uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot, NULL);
kc.sp_flags = sp_flags;
uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot, &kc);
__sp_area_drop(spa);
if (IS_ERR(uva))
pr_err("remap k2u to task failed %ld\n", PTR_ERR(uva));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册