提交 9b4bdd2f 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

mm: drop support of non-linear mapping from fault codepath

We don't create non-linear mappings anymore.  Let's drop code which
handles them on page fault.
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8a5f14a2
...@@ -206,21 +206,19 @@ extern unsigned int kobjsize(const void *objp); ...@@ -206,21 +206,19 @@ extern unsigned int kobjsize(const void *objp);
extern pgprot_t protection_map[16]; extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ #define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ #define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ #define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ #define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ #define FAULT_FLAG_TRIED 0x20 /* Second try */
#define FAULT_FLAG_TRIED 0x40 /* second try */ #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
/* /*
* vm_fault is filled by the the pagefault handler and passed to the vma's * vm_fault is filled by the the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask * ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled. * of VM_FAULT_xxx flags that give details about how the fault was handled.
* *
* pgoff should be used in favour of virtual_address, if possible. If pgoff * pgoff should be used in favour of virtual_address, if possible.
* is used, one may implement ->remap_pages to get nonlinear mapping support.
*/ */
struct vm_fault { struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */ unsigned int flags; /* FAULT_FLAG_xxx flags */
......
...@@ -1899,12 +1899,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, ...@@ -1899,12 +1899,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
EXPORT_SYMBOL_GPL(apply_to_page_range); EXPORT_SYMBOL_GPL(apply_to_page_range);
/* /*
* handle_pte_fault chooses page fault handler according to an entry * handle_pte_fault chooses page fault handler according to an entry which was
* which was read non-atomically. Before making any commitment, on * read non-atomically. Before making any commitment, on those architectures
* those architectures or configurations (e.g. i386 with PAE) which * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
* might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault * parts, do_swap_page must check under lock before unmapping the pte and
* must check under lock before unmapping the pte and proceeding * proceeding (but do_wp_page is only called after already making such a check;
* (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on). * and do_anonymous_page can safely check later on).
*/ */
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
...@@ -2710,8 +2709,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, ...@@ -2710,8 +2709,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
if (write) if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
entry = pte_mksoft_dirty(entry);
if (anon) { if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
...@@ -2846,8 +2843,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2846,8 +2843,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* if page by the offset is not ready to be mapped (cold cache or * if page by the offset is not ready to be mapped (cold cache or
* something). * something).
*/ */
if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) && if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl); pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags); do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte)) if (!pte_same(*pte, orig_pte))
...@@ -2992,7 +2988,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2992,7 +2988,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* The mmap_sem may have been released depending on flags and our * The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry(). * return value. See filemap_fault() and __lock_page_or_retry().
*/ */
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte) unsigned int flags, pte_t orig_pte)
{ {
...@@ -3009,46 +3005,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3009,46 +3005,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
} }
/*
* Fault of a previously existing named mapping. Repopulate the pte
* from the encoded file_pte if possible. This enables swappable
* nonlinear vmas.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff;
flags |= FAULT_FLAG_NONLINEAR;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
return 0;
if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
/*
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
if (!(vma->vm_flags & VM_SHARED))
return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid, unsigned long addr, int page_nid,
int *flags) int *flags)
...@@ -3176,15 +3132,12 @@ static int handle_pte_fault(struct mm_struct *mm, ...@@ -3176,15 +3132,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (pte_none(entry)) { if (pte_none(entry)) {
if (vma->vm_ops) { if (vma->vm_ops) {
if (likely(vma->vm_ops->fault)) if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address, return do_fault(mm, vma, address, pte,
pte, pmd, flags, entry); pmd, flags, entry);
} }
return do_anonymous_page(mm, vma, address, return do_anonymous_page(mm, vma, address,
pte, pmd, flags); pte, pmd, flags);
} }
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);
return do_swap_page(mm, vma, address, return do_swap_page(mm, vma, address,
pte, pmd, flags, entry); pte, pmd, flags, entry);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册