提交 ce53053c 编写于 作者: A Al Viro

kvm: switch get_user_page_nowait() to get_user_pages_unlocked()

... and fold into the sole caller, unifying async and non-async cases
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 e716712f
...@@ -1314,17 +1314,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w ...@@ -1314,17 +1314,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable); return gfn_to_hva_memslot_prot(slot, gfn, writable);
} }
static int get_user_page_nowait(unsigned long start, int write,
struct page **page)
{
int flags = FOLL_NOWAIT | FOLL_HWPOISON;
if (write)
flags |= FOLL_WRITE;
return get_user_pages(start, 1, flags, page, NULL);
}
static inline int check_user_page_hwpoison(unsigned long addr) static inline int check_user_page_hwpoison(unsigned long addr)
{ {
int rc, flags = FOLL_HWPOISON | FOLL_WRITE; int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
...@@ -1373,7 +1362,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, ...@@ -1373,7 +1362,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn) bool *writable, kvm_pfn_t *pfn)
{ {
struct page *page[1]; unsigned int flags = FOLL_HWPOISON;
struct page *page;
int npages = 0; int npages = 0;
might_sleep(); might_sleep();
...@@ -1381,35 +1371,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, ...@@ -1381,35 +1371,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable) if (writable)
*writable = write_fault; *writable = write_fault;
if (async) { if (write_fault)
down_read(&current->mm->mmap_sem); flags |= FOLL_WRITE;
npages = get_user_page_nowait(addr, write_fault, page); if (async)
up_read(&current->mm->mmap_sem); flags |= FOLL_NOWAIT;
} else {
unsigned int flags = FOLL_HWPOISON;
if (write_fault)
flags |= FOLL_WRITE;
npages = get_user_pages_unlocked(addr, 1, page, flags); npages = get_user_pages_unlocked(addr, 1, &page, flags);
}
if (npages != 1) if (npages != 1)
return npages; return npages;
/* map read fault as writable if possible */ /* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) { if (unlikely(!write_fault) && writable) {
struct page *wpage[1]; struct page *wpage;
npages = __get_user_pages_fast(addr, 1, 1, wpage); if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
if (npages == 1) {
*writable = true; *writable = true;
put_page(page[0]); put_page(page);
page[0] = wpage[0]; page = wpage;
} }
npages = 1;
} }
*pfn = page_to_pfn(page[0]); *pfn = page_to_pfn(page);
return npages; return npages;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册