提交 db6c6774 编写于 作者: S Shiraz Saleem 提交者: Jason Gunthorpe

RDMA/umem: Remove hugetlb flag

The drivers i40iw and bnxt_re no longer dependent on the hugetlb flag. So
remove this flag from ib_umem structure.
Reviewed-by: NMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: NShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 d8558251
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/hugetlb.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
...@@ -199,14 +198,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -199,14 +198,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct ib_ucontext *context; struct ib_ucontext *context;
struct ib_umem *umem; struct ib_umem *umem;
struct page **page_list; struct page **page_list;
struct vm_area_struct **vma_list;
unsigned long lock_limit; unsigned long lock_limit;
unsigned long new_pinned; unsigned long new_pinned;
unsigned long cur_base; unsigned long cur_base;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long npages; unsigned long npages;
int ret; int ret;
int i;
unsigned long dma_attrs = 0; unsigned long dma_attrs = 0;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE; unsigned int gup_flags = FOLL_WRITE;
...@@ -264,23 +261,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -264,23 +261,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
return umem; return umem;
} }
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) { if (!page_list) {
ret = -ENOMEM; ret = -ENOMEM;
goto umem_kfree; goto umem_kfree;
} }
/*
* if we can't alloc the vma_list, it's not so bad;
* just assume the memory is not hugetlb memory
*/
vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
if (!vma_list)
umem->hugetlb = 0;
npages = ib_umem_num_pages(umem); npages = ib_umem_num_pages(umem);
if (npages == 0 || npages > UINT_MAX) { if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL; ret = -EINVAL;
...@@ -312,7 +298,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -312,7 +298,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
ret = get_user_pages_longterm(cur_base, ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)), PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list); gup_flags, page_list, NULL);
if (ret < 0) { if (ret < 0) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
goto umem_release; goto umem_release;
...@@ -325,14 +311,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -325,14 +311,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
dma_get_max_seg_size(context->device->dma_device), dma_get_max_seg_size(context->device->dma_device),
&umem->sg_nents); &umem->sg_nents);
/* Continue to hold the mmap_sem as vma_list access
* needs to be protected.
*/
for (i = 0; i < ret && umem->hugetlb; i++) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
umem->hugetlb = 0;
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
} }
...@@ -357,8 +335,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -357,8 +335,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
vma: vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out: out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list); free_page((unsigned long) page_list);
umem_kfree: umem_kfree:
if (ret) { if (ret) {
......
...@@ -417,9 +417,6 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) ...@@ -417,9 +417,6 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
h = hstate_vma(vma); h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h); umem->page_shift = huge_page_shift(h);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
umem->hugetlb = 1;
} else {
umem->hugetlb = 0;
} }
mutex_init(&umem_odp->umem_mutex); mutex_init(&umem_odp->umem_mutex);
......
...@@ -48,7 +48,6 @@ struct ib_umem { ...@@ -48,7 +48,6 @@ struct ib_umem {
unsigned long address; unsigned long address;
int page_shift; int page_shift;
u32 writable : 1; u32 writable : 1;
u32 hugetlb : 1;
u32 is_odp : 1; u32 is_odp : 1;
struct work_struct work; struct work_struct work;
struct sg_table sg_head; struct sg_table sg_head;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册