“30514decb27d45b98599612cb5d3e6a20ba733a5”上不存在“drivers/gpu/drm/scheduler/gpu_scheduler.c”
提交 53b2d09b 编写于 作者: J Jason Gunthorpe 提交者: Andrew Morton

mm/gup: remove the restriction on locked with FOLL_LONGTERM

This restriction was created because FOLL_LONGTERM used to scan the vma
list, so it could not tolerate becoming unlocked.  That was fixed in
commit 52650c8b ("mm/gup: remove the vma allocation from
gup_longterm_locked()") and the restriction on !vma was removed.

However, the locked restriction remained, even though it isn't necessary
anymore.

Adjust __gup_longterm_locked() so it can handle the mmap_read_lock()
becoming unlocked while it is looping for migration.  Migration does not
require the mmap_read_sem because it is only handling struct pages.  If we
had to unlock then ensure the whole thing returns unlocked.

Remove __get_user_pages_remote() and __gup_longterm_unlocked().  These
cases can now just directly call other functions.

Link: https://lkml.kernel.org/r/0-v1-b9ae39aa8884+14dbb-gup_longterm_locked_jgg@nvidia.comSigned-off-by: NJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: NJohn Hubbard <jhubbard@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
上级 eff6aa17
...@@ -2049,14 +2049,19 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -2049,14 +2049,19 @@ static long __gup_longterm_locked(struct mm_struct *mm,
unsigned long nr_pages, unsigned long nr_pages,
struct page **pages, struct page **pages,
struct vm_area_struct **vmas, struct vm_area_struct **vmas,
int *locked,
unsigned int gup_flags) unsigned int gup_flags)
{ {
bool must_unlock = false;
unsigned int flags; unsigned int flags;
long rc, nr_pinned_pages; long rc, nr_pinned_pages;
if (locked && WARN_ON_ONCE(!*locked))
return -EINVAL;
if (!(gup_flags & FOLL_LONGTERM)) if (!(gup_flags & FOLL_LONGTERM))
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags); locked, gup_flags);
/* /*
* If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM
...@@ -2070,8 +2075,13 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -2070,8 +2075,13 @@ static long __gup_longterm_locked(struct mm_struct *mm,
return -EINVAL; return -EINVAL;
flags = memalloc_pin_save(); flags = memalloc_pin_save();
do { do {
if (locked && !*locked) {
mmap_read_lock(mm);
must_unlock = true;
*locked = 1;
}
nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
pages, vmas, NULL, pages, vmas, locked,
gup_flags); gup_flags);
if (nr_pinned_pages <= 0) { if (nr_pinned_pages <= 0) {
rc = nr_pinned_pages; rc = nr_pinned_pages;
...@@ -2081,6 +2091,10 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -2081,6 +2091,10 @@ static long __gup_longterm_locked(struct mm_struct *mm,
} while (rc == -EAGAIN); } while (rc == -EAGAIN);
memalloc_pin_restore(flags); memalloc_pin_restore(flags);
if (locked && *locked && must_unlock) {
mmap_read_unlock(mm);
*locked = 0;
}
return rc ? rc : nr_pinned_pages; return rc ? rc : nr_pinned_pages;
} }
...@@ -2104,35 +2118,6 @@ static bool is_valid_gup_flags(unsigned int gup_flags) ...@@ -2104,35 +2118,6 @@ static bool is_valid_gup_flags(unsigned int gup_flags)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static long __get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
/*
* Parts of FOLL_LONGTERM behavior are incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. However, this only comes up if locked is set, and there are
* callers that do request FOLL_LONGTERM, but do not set locked. So,
* allow what we can.
*/
if (gup_flags & FOLL_LONGTERM) {
if (WARN_ON_ONCE(locked))
return -EINVAL;
/*
* This will check the vmas (even if our vmas arg is NULL)
* and return -ENOTSUPP if DAX isn't allowed in this case:
*/
return __gup_longterm_locked(mm, start, nr_pages, pages,
vmas, gup_flags | FOLL_TOUCH |
FOLL_REMOTE);
}
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
/** /**
* get_user_pages_remote() - pin user pages in memory * get_user_pages_remote() - pin user pages in memory
* @mm: mm_struct of target mm * @mm: mm_struct of target mm
...@@ -2201,8 +2186,8 @@ long get_user_pages_remote(struct mm_struct *mm, ...@@ -2201,8 +2186,8 @@ long get_user_pages_remote(struct mm_struct *mm,
if (!is_valid_gup_flags(gup_flags)) if (!is_valid_gup_flags(gup_flags))
return -EINVAL; return -EINVAL;
return __get_user_pages_remote(mm, start, nr_pages, gup_flags, return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked,
pages, vmas, locked); gup_flags | FOLL_TOUCH | FOLL_REMOTE);
} }
EXPORT_SYMBOL(get_user_pages_remote); EXPORT_SYMBOL(get_user_pages_remote);
...@@ -2214,14 +2199,6 @@ long get_user_pages_remote(struct mm_struct *mm, ...@@ -2214,14 +2199,6 @@ long get_user_pages_remote(struct mm_struct *mm,
{ {
return 0; return 0;
} }
static long __get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
return 0;
}
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
/** /**
...@@ -2248,7 +2225,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -2248,7 +2225,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
return -EINVAL; return -EINVAL;
return __gup_longterm_locked(current->mm, start, nr_pages, return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, gup_flags | FOLL_TOUCH); pages, vmas, NULL, gup_flags | FOLL_TOUCH);
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
...@@ -2274,18 +2251,9 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, ...@@ -2274,18 +2251,9 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int locked = 1; int locked = 1;
long ret; long ret;
/*
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. As there are no users of this flag in this call we simply
* disallow this option for now.
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
mmap_read_lock(mm); mmap_read_lock(mm);
ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, ret = __gup_longterm_locked(mm, start, nr_pages, pages, NULL, &locked,
&locked, gup_flags | FOLL_TOUCH); gup_flags | FOLL_TOUCH);
if (locked) if (locked)
mmap_read_unlock(mm); mmap_read_unlock(mm);
return ret; return ret;
...@@ -2879,29 +2847,6 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end) ...@@ -2879,29 +2847,6 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
} }
#endif #endif
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
int ret;
/*
* FIXME: FOLL_LONGTERM does not work with
* get_user_pages_unlocked() (see comments in that function)
*/
if (gup_flags & FOLL_LONGTERM) {
mmap_read_lock(current->mm);
ret = __gup_longterm_locked(current->mm,
start, nr_pages,
pages, NULL, gup_flags);
mmap_read_unlock(current->mm);
} else {
ret = get_user_pages_unlocked(start, nr_pages,
pages, gup_flags);
}
return ret;
}
static unsigned long lockless_pages_from_mm(unsigned long start, static unsigned long lockless_pages_from_mm(unsigned long start,
unsigned long end, unsigned long end,
unsigned int gup_flags, unsigned int gup_flags,
...@@ -2985,8 +2930,8 @@ static int internal_get_user_pages_fast(unsigned long start, ...@@ -2985,8 +2930,8 @@ static int internal_get_user_pages_fast(unsigned long start,
/* Slow path: try to get the remaining pages with get_user_pages */ /* Slow path: try to get the remaining pages with get_user_pages */
start += nr_pinned << PAGE_SHIFT; start += nr_pinned << PAGE_SHIFT;
pages += nr_pinned; pages += nr_pinned;
ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags, ret = get_user_pages_unlocked(start, nr_pages - nr_pinned, pages,
pages); gup_flags);
if (ret < 0) { if (ret < 0) {
/* /*
* The caller has to unpin the pages we already pinned so * The caller has to unpin the pages we already pinned so
...@@ -3185,9 +3130,9 @@ long pin_user_pages_remote(struct mm_struct *mm, ...@@ -3185,9 +3130,9 @@ long pin_user_pages_remote(struct mm_struct *mm,
if (WARN_ON_ONCE(!pages)) if (WARN_ON_ONCE(!pages))
return -EINVAL; return -EINVAL;
gup_flags |= FOLL_PIN; return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked,
return __get_user_pages_remote(mm, start, nr_pages, gup_flags, gup_flags | FOLL_PIN | FOLL_TOUCH |
pages, vmas, locked); FOLL_REMOTE);
} }
EXPORT_SYMBOL(pin_user_pages_remote); EXPORT_SYMBOL(pin_user_pages_remote);
...@@ -3221,7 +3166,7 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -3221,7 +3166,7 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
gup_flags |= FOLL_PIN; gup_flags |= FOLL_PIN;
return __gup_longterm_locked(current->mm, start, nr_pages, return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, gup_flags); pages, vmas, NULL, gup_flags);
} }
EXPORT_SYMBOL(pin_user_pages); EXPORT_SYMBOL(pin_user_pages);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册