提交 aee16b3c 编写于 作者: J Jeremy Fitzhardinge 提交者: Linus Torvalds

Add apply_to_page_range() which applies a function to a pte range

Add a new mm function apply_to_page_range() which applies a given function to
every pte in a given virtual address range in a given mm structure.  This is a
generic alternative to cut-and-pasting the Linux idiomatic pagetable walking
code in every place that a sequence of PTEs must be accessed.

Although this interface is intended to be useful in a wide range of
situations, it is currently used specifically by several Xen subsystems, for
example: to ensure that pagetables have been allocated for a virtual address
range, and to construct batched special pagetable update requests to map I/O
memory (in ioremap()).

[akpm@linux-foundation.org: fix warning, unpleasantly]
Signed-off-by: NIan Pratt <ian.pratt@xensource.com>
Signed-off-by: NChristian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: NChris Wright <chrisw@sous-sol.org>
Signed-off-by: NJeremy Fitzhardinge <jeremy@xensource.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Matt Mackall <mpm@waste.org>
Acked-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 eb3a1e11
...@@ -1130,6 +1130,11 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, ...@@ -1130,6 +1130,11 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
#else #else
......
...@@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, ...@@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
} }
EXPORT_SYMBOL(remap_pfn_range); EXPORT_SYMBOL(remap_pfn_range);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pte_t *pte;
int err;
struct page *pmd_page;
spinlock_t *ptl = ptl; /* Suppress gcc warning */
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
BUG_ON(pmd_huge(*pmd));
pmd_page = pmd_page(*pmd);
do {
err = fn(pte, pmd_page, addr, data);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
if (mm != &init_mm)
pte_unmap_unlock(pte-1, ptl);
return err;
}
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pmd_t *pmd;
unsigned long next;
int err;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pud_t *pud;
unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
*/
int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + size;
int err;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
/* /*
* handle_pte_fault chooses page fault handler according to an entry * handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on * which was read non-atomically. Before making any commitment, on
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册