diff --git a/drivers/char/svm.c b/drivers/char/svm.c index 053582aa59444e7df97da9873bf24f4d86c4cba3..7f9d854e15f94d03999288cb589ac8b86ccb95b5 100644 --- a/drivers/char/svm.c +++ b/drivers/char/svm.c @@ -48,6 +48,9 @@ #define SVM_REMAP_MEM_LEN_MAX (16 * 1024 * 1024) +#define SVM_IOCTL_RELEASE_PHYS32 0xfff3 +#define MMAP_PHY32_MAX (16 * 1024 * 1024) + #define CORE_SID 0 static int probe_index; static LIST_HEAD(child_list); @@ -146,6 +149,8 @@ static char *svm_cmd_to_string(unsigned int cmd) return "remap proc"; case SVM_IOCTL_LOAD_FLAG: return "load flag"; + case SVM_IOCTL_RELEASE_PHYS32: + return "release phys"; default: return "unsupported"; } @@ -1483,12 +1488,6 @@ static unsigned long svm_get_unmapped_area(struct file *file, if (!acpi_disabled) return -EPERM; - if (len != sdev->l2size) { - dev_err(sdev->dev, "Just map the size of L2BUFF %ld\n", - sdev->l2size); - return -EINVAL; //lint !e570 - } - if (flags & MAP_FIXED) { if (IS_ALIGNED(addr, len)) return addr; @@ -1544,23 +1543,87 @@ static int svm_mmap(struct file *file, struct vm_area_struct *vma) if (!acpi_disabled) return -EPERM; - if ((vma->vm_end < vma->vm_start) || - ((vma->vm_end - vma->vm_start) > sdev->l2size)) - return -EINVAL; + if (vma->vm_flags & VM_PA32BIT) { + unsigned long vm_size = vma->vm_end - vma->vm_start; + struct page *page = NULL; - vma->vm_page_prot = __pgprot((~PTE_SHARED) & vma->vm_page_prot.pgprot); + if ((vma->vm_end < vma->vm_start) || (vm_size > MMAP_PHY32_MAX)) + return -EINVAL; - err = remap_pfn_range(vma, vma->vm_start, sdev->l2buff >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - __pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY)); + page = alloc_pages(GFP_KERNEL | GFP_DMA32, get_order(vm_size)); + if (!page) { + dev_err(sdev->dev, "fail to alloc page\n"); + return -ENOMEM; + } - if (err) - dev_err(sdev->dev, "fail to remap 0x%pK err = %d\n", - (void *)vma->vm_start, err); + err = remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vm_size, vma->vm_page_prot); + if (err) + dev_err(sdev->dev, + "fail to remap 0x%pK err=%d\n", + (void *)vma->vm_start, err); + } else { + if ((vma->vm_end < vma->vm_start) || + ((vma->vm_end - vma->vm_start) > sdev->l2size)) + return -EINVAL; + + vma->vm_page_prot = __pgprot((~PTE_SHARED) & + vma->vm_page_prot.pgprot); + + err = remap_pfn_range(vma, + vma->vm_start, + sdev->l2buff >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + __pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY)); + if (err) + dev_err(sdev->dev, + "fail to remap 0x%pK err=%d\n", + (void *)vma->vm_start, err); + } return err; } +static int svm_release_phys32(unsigned long __user *arg) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + struct page *page = NULL; + pte_t *pte = NULL; + unsigned long phys, addr, offset; + unsigned int len = 0; + + if (arg == NULL) + return -EINVAL; + + if (get_user(addr, arg)) + return -EFAULT; + + pte = svm_walk_pt(addr, NULL, &offset); + if (pte && pte_present(*pte)) + phys = PFN_PHYS(pte_pfn(*pte)) + offset; + else + return -EINVAL; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, addr); + if (!vma) { + up_read(&mm->mmap_sem); + return -EFAULT; + } + + page = phys_to_page(phys); + len = vma->vm_end - vma->vm_start; + + __free_pages(page, get_order(len)); + + up_read(&mm->mmap_sem); + + return 0; +} + /*svm ioctl will include some case for HI1980 and HI1910*/ static long svm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -1630,6 +1693,9 @@ static long svm_ioctl(struct file *file, unsigned int cmd, case SVM_IOCTL_LOAD_FLAG: err = svm_proc_load_flag((int __user *)arg); break; + case SVM_IOCTL_RELEASE_PHYS32: + err = svm_release_phys32((unsigned long __user *)arg); + break; default: err = -EINVAL; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 0e173a4d9cec48c6d0757b054fb43319b44fa688..7a59dba0024eae11a27f98f2df72c0d9d3622497 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -219,6 +219,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ +#define VM_PA32BIT 0x400000000 /* Physical address is within 4G */ #ifdef CONFIG_COHERENT_DEVICE #define VM_CDM 0x100000000 /* Contains coherent device memory */ diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index 653687d9771b9d0e824fe4b25eee079f7fcec1cc..1915bcc107ebb70d4ac3f63985ab8f129b55e999 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h @@ -14,6 +14,7 @@ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ diff --git a/mm/mmap.c b/mm/mmap.c index 4d149cf8d5910317ca6630bb0add9f472e86809c..c1034012aeaa08be4ed8181fde74cc0f4b03e8f1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1415,6 +1415,10 @@ unsigned long do_mmap(struct file *file, unsigned long addr, pkey = 0; } + /* Physical address is within 4G */ + if (flags & MAP_PA32BIT) + vm_flags |= VM_PA32BIT; + /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here.