提交 6daa0e28 编写于 作者: A Al Viro 提交者: Linus Torvalds

[PATCH] gfp_t: mm/* (easy parts)

Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 af4ca457
...@@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr, ...@@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr,
* The callback will be passed nr_to_scan == 0 when the VM is querying the * The callback will be passed nr_to_scan == 0 when the VM is querying the
* cache size, so a fastpath for that case is appropriate. * cache size, so a fastpath for that case is appropriate.
*/ */
typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask); typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
/* /*
* Add an aging callback. The int is the number of 'seeks' it takes * Add an aging callback. The int is the number of 'seeks' it takes
......
...@@ -69,7 +69,7 @@ extern struct page * find_lock_page(struct address_space *mapping, ...@@ -69,7 +69,7 @@ extern struct page * find_lock_page(struct address_space *mapping,
extern struct page * find_trylock_page(struct address_space *mapping, extern struct page * find_trylock_page(struct address_space *mapping,
unsigned long index); unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping, extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, unsigned int gfp_mask); unsigned long index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages); unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
...@@ -92,9 +92,9 @@ extern int read_cache_pages(struct address_space *mapping, ...@@ -92,9 +92,9 @@ extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data); struct list_head *pages, filler_t *filler, void *data);
int add_to_page_cache(struct page *page, struct address_space *mapping, int add_to_page_cache(struct page *page, struct address_space *mapping,
unsigned long index, int gfp_mask); unsigned long index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping, int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
unsigned long index, int gfp_mask); unsigned long index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page); extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page);
......
...@@ -121,7 +121,7 @@ extern unsigned int ksize(const void *); ...@@ -121,7 +121,7 @@ extern unsigned int ksize(const void *);
extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
extern void *kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmalloc_node(size_t size, gfp_t flags, int node);
#else #else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
......
...@@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page); ...@@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void); extern void swap_setup(void);
/* linux/mm/vmscan.c */ /* linux/mm/vmscan.c */
extern int try_to_free_pages(struct zone **, unsigned int); extern int try_to_free_pages(struct zone **, gfp_t);
extern int zone_reclaim(struct zone *, unsigned int, unsigned int); extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
extern int shrink_all_memory(int); extern int shrink_all_memory(int);
extern int vm_swappiness; extern int vm_swappiness;
......
...@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping, ...@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
* This function does not add the page to the LRU. The caller must do that. * This function does not add the page to the LRU. The caller must do that.
*/ */
int add_to_page_cache(struct page *page, struct address_space *mapping, int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, int gfp_mask) pgoff_t offset, gfp_t gfp_mask)
{ {
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
...@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL(add_to_page_cache); EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping, int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, int gfp_mask) pgoff_t offset, gfp_t gfp_mask)
{ {
int ret = add_to_page_cache(page, mapping, offset, gfp_mask); int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0) if (ret == 0)
...@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page); ...@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
* memory exhaustion. * memory exhaustion.
*/ */
struct page *find_or_create_page(struct address_space *mapping, struct page *find_or_create_page(struct address_space *mapping,
unsigned long index, unsigned int gfp_mask) unsigned long index, gfp_t gfp_mask)
{ {
struct page *page, *cached_page = NULL; struct page *page, *cached_page = NULL;
int err; int err;
...@@ -683,7 +683,7 @@ struct page * ...@@ -683,7 +683,7 @@ struct page *
grab_cache_page_nowait(struct address_space *mapping, unsigned long index) grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{ {
struct page *page = find_get_page(mapping, index); struct page *page = find_get_page(mapping, index);
unsigned int gfp_mask; gfp_t gfp_mask;
if (page) { if (page) {
if (!TestSetPageLocked(page)) if (!TestSetPageLocked(page))
......
...@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) ...@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
void *element; void *element;
unsigned long flags; unsigned long flags;
wait_queue_t wait; wait_queue_t wait;
unsigned int gfp_temp; gfp_t gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT); might_sleep_if(gfp_mask & __GFP_WAIT);
......
...@@ -85,7 +85,7 @@ enum sgp_type { ...@@ -85,7 +85,7 @@ enum sgp_type {
static int shmem_getpage(struct inode *inode, unsigned long idx, static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp, int *type); struct page **pagep, enum sgp_type sgp, int *type);
static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
{ {
/* /*
* The above definition of ENTRIES_PER_PAGE, and the use of * The above definition of ENTRIES_PER_PAGE, and the use of
...@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, ...@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
} }
static struct page * static struct page *
shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
unsigned long idx) unsigned long idx)
{ {
struct vm_area_struct pvma; struct vm_area_struct pvma;
......
...@@ -386,7 +386,7 @@ struct kmem_cache_s { ...@@ -386,7 +386,7 @@ struct kmem_cache_s {
unsigned int gfporder; unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */ /* force GFP flags, e.g. GFP_DMA */
unsigned int gfpflags; gfp_t gfpflags;
size_t colour; /* cache colouring range */ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
...@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep, ...@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0; slabp->free = 0;
} }
static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
{ {
if (flags & SLAB_DMA) { if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA)) if (!(cachep->gfpflags & GFP_DMA))
...@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
struct slab *slabp; struct slab *slabp;
void *objp; void *objp;
size_t offset; size_t offset;
unsigned int local_flags; gfp_t local_flags;
unsigned long ctor_flags; unsigned long ctor_flags;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/* /*
* A interface to enable slab creation on nodeid * A interface to enable slab creation on nodeid
*/ */
static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{ {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct slab *slabp;
......
...@@ -70,7 +70,7 @@ struct scan_control { ...@@ -70,7 +70,7 @@ struct scan_control {
unsigned int priority; unsigned int priority;
/* This context's GFP mask */ /* This context's GFP mask */
unsigned int gfp_mask; gfp_t gfp_mask;
int may_writepage; int may_writepage;
...@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker); ...@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
* *
* Returns the number of slab objects which we shrunk. * Returns the number of slab objects which we shrunk.
*/ */
static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages) unsigned long lru_pages)
{ {
struct shrinker *shrinker; struct shrinker *shrinker;
...@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) ...@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
* holds filesystem locks which prevent writeout this might not work, and the * holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail. * allocation attempt will fail.
*/ */
int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
{ {
int priority; int priority;
int ret = 0; int ret = 0;
...@@ -1338,7 +1338,7 @@ module_init(kswapd_init) ...@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
/* /*
* Try to free up some pages from this zone through reclaim. * Try to free up some pages from this zone through reclaim.
*/ */
int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{ {
struct scan_control sc; struct scan_control sc;
int nr_pages = 1 << order; int nr_pages = 1 << order;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册