提交 7b2259b3 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] page migration: Support a vma migration function

Hooks for calling vma specific migration functions

With this patch a vma may define a vma->vm_ops->migrate function.  That
function may perform page migration on its own (some vmas may not contain page
structs and therefore cannot be handled by regular page migration.  Pages in a
vma may require special preparatory treatment before migration is possible
etc) .  Only mmap_sem is held when the migration function is called.  The
migrate() function gets passed two sets of nodemasks describing the source and
the target of the migration.  The flags parameter either contains

MPOL_MF_MOVE	which means that only pages used exclusively by
		the specified mm should be moved

or

MPOL_MF_MOVE_ALL which means that pages shared with other processes
		should also be moved.

The migration function returns 0 on success or an error condition.  An error
condition will prevent regular page migration from occurring.

On its own this patch cannot be included since there are no users for this
functionality.  But it seems that the uncached allocator will need this
functionality at some point.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 68402ddc
...@@ -16,7 +16,9 @@ extern int fail_migrate_page(struct address_space *, ...@@ -16,7 +16,9 @@ extern int fail_migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
extern int migrate_prep(void); extern int migrate_prep(void);
extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
#else #else
static inline int isolate_lru_page(struct page *p, struct list_head *list) static inline int isolate_lru_page(struct page *p, struct list_head *list)
...@@ -30,6 +32,13 @@ static inline int migrate_pages_to(struct list_head *pagelist, ...@@ -30,6 +32,13 @@ static inline int migrate_pages_to(struct list_head *pagelist,
static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags)
{
return -ENOSYS;
}
/* Possible settings for the migrate_page() method in address_operations */ /* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL #define migrate_page NULL
#define fail_migrate_page NULL #define fail_migrate_page NULL
......
...@@ -206,6 +206,8 @@ struct vm_operations_struct { ...@@ -206,6 +206,8 @@ struct vm_operations_struct {
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma, struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr); unsigned long addr);
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
const nodemask_t *to, unsigned long flags);
#endif #endif
}; };
......
...@@ -632,6 +632,10 @@ int do_migrate_pages(struct mm_struct *mm, ...@@ -632,6 +632,10 @@ int do_migrate_pages(struct mm_struct *mm,
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
err = migrate_vmas(mm, from_nodes, to_nodes, flags);
if (err)
goto out;
/* /*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest' * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
* bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
...@@ -691,7 +695,7 @@ int do_migrate_pages(struct mm_struct *mm, ...@@ -691,7 +695,7 @@ int do_migrate_pages(struct mm_struct *mm,
if (err < 0) if (err < 0)
break; break;
} }
out:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -976,3 +976,23 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, ...@@ -976,3 +976,23 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
} }
#endif #endif
/*
* Call migration functions in the vma_ops that may prepare
* memory in a vm for migration. migration functions may perform
* the migration for vmas that do not have an underlying page struct.
*/
int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
const nodemask_t *from, unsigned long flags)
{
struct vm_area_struct *vma;
int err = 0;
for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops->migrate) {
err = vma->vm_ops->migrate(vma, to, from, flags);
if (err)
break;
}
}
return err;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册