提交 221302d3 编写于 作者: B Bixuan Cui 提交者: Yang Yingliang

svm: add support for allocing memory which is within 4G physical address in svm_mmap

ascend inclusion
category: feature
bugzilla: NA
CVE: NA

-------------------------------------------------

Add alloc and release memory functions in svm. And the physical address
of the memory is within 4GB.

For example:
    /* alloc */
    fd = open("dev/svm0",);
    mmap(0, ALLOC_SIZE,, MAP_PA32BIT, fd, 0);

    /* free */
    ioctl(fd, SVM_IOCTL_RELEASE_PHYS32,);
    close(fd);
Signed-off-by: NBixuan Cui <cuibixuan@huawei.com>
Reviewed-by: NZefan Li <lizefan@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 ae336d6f
...@@ -48,6 +48,9 @@ ...@@ -48,6 +48,9 @@
#define SVM_REMAP_MEM_LEN_MAX (16 * 1024 * 1024) #define SVM_REMAP_MEM_LEN_MAX (16 * 1024 * 1024)
#define SVM_IOCTL_RELEASE_PHYS32 0xfff3
#define MMAP_PHY32_MAX (16 * 1024 * 1024)
#define CORE_SID 0 #define CORE_SID 0
static int probe_index; static int probe_index;
static LIST_HEAD(child_list); static LIST_HEAD(child_list);
...@@ -146,6 +149,8 @@ static char *svm_cmd_to_string(unsigned int cmd) ...@@ -146,6 +149,8 @@ static char *svm_cmd_to_string(unsigned int cmd)
return "remap proc"; return "remap proc";
case SVM_IOCTL_LOAD_FLAG: case SVM_IOCTL_LOAD_FLAG:
return "load flag"; return "load flag";
case SVM_IOCTL_RELEASE_PHYS32:
return "release phys";
default: default:
return "unsupported"; return "unsupported";
} }
...@@ -1483,12 +1488,6 @@ static unsigned long svm_get_unmapped_area(struct file *file, ...@@ -1483,12 +1488,6 @@ static unsigned long svm_get_unmapped_area(struct file *file,
if (!acpi_disabled) if (!acpi_disabled)
return -EPERM; return -EPERM;
if (len != sdev->l2size) {
dev_err(sdev->dev, "Just map the size of L2BUFF %ld\n",
sdev->l2size);
return -EINVAL; //lint !e570
}
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
if (IS_ALIGNED(addr, len)) if (IS_ALIGNED(addr, len))
return addr; return addr;
...@@ -1544,23 +1543,87 @@ static int svm_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1544,23 +1543,87 @@ static int svm_mmap(struct file *file, struct vm_area_struct *vma)
if (!acpi_disabled) if (!acpi_disabled)
return -EPERM; return -EPERM;
if ((vma->vm_end < vma->vm_start) || if (vma->vm_flags & VM_PA32BIT) {
((vma->vm_end - vma->vm_start) > sdev->l2size)) unsigned long vm_size = vma->vm_end - vma->vm_start;
return -EINVAL; struct page *page = NULL;
vma->vm_page_prot = __pgprot((~PTE_SHARED) & vma->vm_page_prot.pgprot); if ((vma->vm_end < vma->vm_start) || (vm_size > MMAP_PHY32_MAX))
return -EINVAL;
err = remap_pfn_range(vma, vma->vm_start, sdev->l2buff >> PAGE_SHIFT, page = alloc_pages(GFP_KERNEL | GFP_DMA32, get_order(vm_size));
vma->vm_end - vma->vm_start, if (!page) {
__pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY)); dev_err(sdev->dev, "fail to alloc page\n");
return -ENOMEM;
}
if (err) err = remap_pfn_range(vma,
dev_err(sdev->dev, "fail to remap 0x%pK err = %d\n", vma->vm_start,
(void *)vma->vm_start, err); page_to_pfn(page),
vm_size, vma->vm_page_prot);
if (err)
dev_err(sdev->dev,
"fail to remap 0x%pK err=%d\n",
(void *)vma->vm_start, err);
} else {
if ((vma->vm_end < vma->vm_start) ||
((vma->vm_end - vma->vm_start) > sdev->l2size))
return -EINVAL;
vma->vm_page_prot = __pgprot((~PTE_SHARED) &
vma->vm_page_prot.pgprot);
err = remap_pfn_range(vma,
vma->vm_start,
sdev->l2buff >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
__pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY));
if (err)
dev_err(sdev->dev,
"fail to remap 0x%pK err=%d\n",
(void *)vma->vm_start, err);
}
return err; return err;
} }
static int svm_release_phys32(unsigned long __user *arg)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct page *page = NULL;
pte_t *pte = NULL;
unsigned long phys, addr, offset;
unsigned int len = 0;
if (arg == NULL)
return -EINVAL;
if (get_user(addr, arg))
return -EFAULT;
pte = svm_walk_pt(addr, NULL, &offset);
if (pte && pte_present(*pte))
phys = PFN_PHYS(pte_pfn(*pte)) + offset;
else
return -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
if (!vma) {
up_read(&mm->mmap_sem);
return -EFAULT;
}
page = phys_to_page(phys);
len = vma->vm_end - vma->vm_start;
__free_pages(page, get_order(len));
up_read(&mm->mmap_sem);
return 0;
}
/*svm ioctl will include some case for HI1980 and HI1910*/ /*svm ioctl will include some case for HI1980 and HI1910*/
static long svm_ioctl(struct file *file, unsigned int cmd, static long svm_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
...@@ -1630,6 +1693,9 @@ static long svm_ioctl(struct file *file, unsigned int cmd, ...@@ -1630,6 +1693,9 @@ static long svm_ioctl(struct file *file, unsigned int cmd,
case SVM_IOCTL_LOAD_FLAG: case SVM_IOCTL_LOAD_FLAG:
err = svm_proc_load_flag((int __user *)arg); err = svm_proc_load_flag((int __user *)arg);
break; break;
case SVM_IOCTL_RELEASE_PHYS32:
err = svm_release_phys32((unsigned long __user *)arg);
break;
default: default:
err = -EINVAL; err = -EINVAL;
} }
......
...@@ -219,6 +219,7 @@ extern unsigned int kobjsize(const void *objp); ...@@ -219,6 +219,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
#define VM_PA32BIT 0x400000000 /* Physical address is within 4G */
#ifdef CONFIG_COHERENT_DEVICE #ifdef CONFIG_COHERENT_DEVICE
#define VM_CDM 0x100000000 /* Contains coherent device memory */ #define VM_CDM 0x100000000 /* Contains coherent device memory */
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ #define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
#define MAP_PA32BIT 0x400000 /* physical address is within 4G */
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
......
...@@ -1415,6 +1415,10 @@ unsigned long do_mmap(struct file *file, unsigned long addr, ...@@ -1415,6 +1415,10 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
pkey = 0; pkey = 0;
} }
/* Physical address is within 4G */
if (flags & MAP_PA32BIT)
vm_flags |= VM_PA32BIT;
/* Do simple checking here so the lower-level routines won't have /* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open * to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here. * of the memory object, so we don't do any here.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册