提交 a19b27ce 编写于 作者: M Matthew Dobson 提交者: Linus Torvalds

[PATCH] mempool: use common mempool page allocator

Convert two mempool users that currently use their own mempool-backed page
allocators to use the generic mempool page allocator.

Also included are 2 trivial whitespace fixes.
Signed-off-by: NMatthew Dobson <colpatch@us.ibm.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 6e0678f3
......@@ -93,20 +93,6 @@ struct crypt_config {
static kmem_cache_t *_crypt_io_pool;
/*
* Mempool alloc and free functions for the page
*/
static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static void mempool_free_page(void *page, void *data)
{
__free_page(page);
}
/*
* Different IV generation algorithms:
*
......@@ -637,8 +623,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad3;
}
cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page,
mempool_free_page, NULL);
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
ti->error = PFX "Cannot allocate page mempool";
goto bad4;
......
......@@ -31,14 +31,9 @@
static mempool_t *page_pool, *isa_page_pool;
static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask | GFP_DMA);
}
static void page_pool_free(void *page, void *data)
{
__free_page(page);
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
/*
......@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data)
*/
#ifdef CONFIG_HIGHMEM
static void *page_pool_alloc(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
......@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void)
if (!i.totalhigh)
return 0;
page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
if (!page_pool)
BUG();
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
......@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void)
if (isa_page_pool)
return 0;
isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
if (!isa_page_pool)
BUG();
......@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
bio_put(bio);
}
static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
{
if (bio->bi_size)
return 1;
......@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
}
static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
mempool_t *pool)
mempool_t *pool)
{
struct page *page;
struct bio *bio = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册