提交 cf725ce2 编写于 作者: R Roman Pen 提交者: Linus Torvalds

mm/vmalloc: occupy newly allocated vmap block just after allocation

Previous implementation allocates new vmap block and repeats search of a
free block from the very beginning, iterating over the CPU free list.

Why it can be better??

1. Allocation can happen on one CPU, but search can be done on another CPU.
   In worst case we preallocate amount of vmap blocks which is equal to
   CPU number on the system.

2. In previous patch I added newly allocated block to the tail of free list
   to avoid soon exhaustion of virtual space and give a chance to occupy
   blocks which were allocated long time ago.  Thus to find newly allocated
   block all the search sequence should be repeated, seems it is not efficient.

In this patch newly allocated block is occupied right away, address of
virtual space is returned to the caller, so there is no any need to repeat
the search sequence, allocation job is done.
Signed-off-by: NRoman Pen <r.peniaev@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Eric Dumazet <edumazet@google.com>
Acked-by: NJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: David Rientjes <rientjes@google.com>
Cc: WANG Chao <chaowang@redhat.com>
Cc: Fabian Frederick <fabf@skynet.be>
Cc: Christoph Lameter <cl@linux.com>
Cc: Gioh Kim <gioh.kim@lge.com>
Cc: Rob Jones <rob.jones@codethink.co.uk>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 68ac546f
...@@ -796,13 +796,31 @@ static unsigned long addr_to_vb_idx(unsigned long addr) ...@@ -796,13 +796,31 @@ static unsigned long addr_to_vb_idx(unsigned long addr)
return addr; return addr;
} }
static struct vmap_block *new_vmap_block(gfp_t gfp_mask) static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
{
unsigned long addr;
addr = va_start + (pages_off << PAGE_SHIFT);
BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
return (void *)addr;
}
/**
* new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
* block. Of course pages number can't exceed VMAP_BBMAP_BITS
* @order: how many 2^order pages should be occupied in newly allocated block
* @gfp_mask: flags for the page level allocator
*
* Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
*/
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
{ {
struct vmap_block_queue *vbq; struct vmap_block_queue *vbq;
struct vmap_block *vb; struct vmap_block *vb;
struct vmap_area *va; struct vmap_area *va;
unsigned long vb_idx; unsigned long vb_idx;
int node, err; int node, err;
void *vaddr;
node = numa_node_id(); node = numa_node_id();
...@@ -826,9 +844,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) ...@@ -826,9 +844,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
return ERR_PTR(err); return ERR_PTR(err);
} }
vaddr = vmap_block_vaddr(va->va_start, 0);
spin_lock_init(&vb->lock); spin_lock_init(&vb->lock);
vb->va = va; vb->va = va;
vb->free = VMAP_BBMAP_BITS; /* At least something should be left free */
BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
vb->free = VMAP_BBMAP_BITS - (1UL << order);
vb->dirty = 0; vb->dirty = 0;
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
INIT_LIST_HEAD(&vb->free_list); INIT_LIST_HEAD(&vb->free_list);
...@@ -846,7 +867,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) ...@@ -846,7 +867,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
spin_unlock(&vbq->lock); spin_unlock(&vbq->lock);
put_cpu_var(vmap_block_queue); put_cpu_var(vmap_block_queue);
return vb; return vaddr;
} }
static void free_vmap_block(struct vmap_block *vb) static void free_vmap_block(struct vmap_block *vb)
...@@ -910,7 +931,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) ...@@ -910,7 +931,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{ {
struct vmap_block_queue *vbq; struct vmap_block_queue *vbq;
struct vmap_block *vb; struct vmap_block *vb;
unsigned long addr = 0; void *vaddr = NULL;
unsigned int order; unsigned int order;
BUG_ON(size & ~PAGE_MASK); BUG_ON(size & ~PAGE_MASK);
...@@ -925,43 +946,38 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) ...@@ -925,43 +946,38 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
} }
order = get_order(size); order = get_order(size);
again:
rcu_read_lock(); rcu_read_lock();
vbq = &get_cpu_var(vmap_block_queue); vbq = &get_cpu_var(vmap_block_queue);
list_for_each_entry_rcu(vb, &vbq->free, free_list) { list_for_each_entry_rcu(vb, &vbq->free, free_list) {
int i; unsigned long pages_off;
spin_lock(&vb->lock); spin_lock(&vb->lock);
if (vb->free < 1UL << order) if (vb->free < (1UL << order)) {
goto next; spin_unlock(&vb->lock);
continue;
}
i = VMAP_BBMAP_BITS - vb->free; pages_off = VMAP_BBMAP_BITS - vb->free;
addr = vb->va->va_start + (i << PAGE_SHIFT); vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
BUG_ON(addr_to_vb_idx(addr) !=
addr_to_vb_idx(vb->va->va_start));
vb->free -= 1UL << order; vb->free -= 1UL << order;
if (vb->free == 0) { if (vb->free == 0) {
spin_lock(&vbq->lock); spin_lock(&vbq->lock);
list_del_rcu(&vb->free_list); list_del_rcu(&vb->free_list);
spin_unlock(&vbq->lock); spin_unlock(&vbq->lock);
} }
spin_unlock(&vb->lock); spin_unlock(&vb->lock);
break; break;
next:
spin_unlock(&vb->lock);
} }
put_cpu_var(vmap_block_queue); put_cpu_var(vmap_block_queue);
rcu_read_unlock(); rcu_read_unlock();
if (!addr) { /* Allocate new block if nothing was found */
vb = new_vmap_block(gfp_mask); if (!vaddr)
if (IS_ERR(vb)) vaddr = new_vmap_block(order, gfp_mask);
return vb;
goto again;
}
return (void *)addr; return vaddr;
} }
static void vb_free(const void *addr, unsigned long size) static void vb_free(const void *addr, unsigned long size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册