提交 80c4bd7a 编写于 作者: C Chris Wilson 提交者: Linus Torvalds

mm/vmalloc: keep a separate lazy-free list

When mixing lots of vmallocs and set_memory_*() (which calls
vm_unmap_aliases()) I encountered situations where the performance
degraded severely due to the walking of the entire vmap_area list each
invocation.

One simple improvement is to add the lazily freed vmap_area to a
separate lockless free list, such that we then avoid having to walk the
full list on each purge.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NRoman Pen <r.peniaev@gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Roman Pen <r.peniaev@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Shawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f705ac4b
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */ #include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h> #include <linux/rbtree.h>
...@@ -44,7 +45,7 @@ struct vmap_area { ...@@ -44,7 +45,7 @@ struct vmap_area {
unsigned long flags; unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */ struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */ struct list_head list; /* address sorted list */
struct list_head purge_list; /* "lazy purge" list */ struct llist_node purge_list; /* "lazy purge" list */
struct vm_struct *vm; struct vm_struct *vm;
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
......
...@@ -274,13 +274,12 @@ EXPORT_SYMBOL(vmalloc_to_pfn); ...@@ -274,13 +274,12 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
/*** Global kva allocator ***/ /*** Global kva allocator ***/
#define VM_LAZY_FREE 0x01
#define VM_LAZY_FREEING 0x02
#define VM_VM_AREA 0x04 #define VM_VM_AREA 0x04
static DEFINE_SPINLOCK(vmap_area_lock); static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */ /* Export for kexec only */
LIST_HEAD(vmap_area_list); LIST_HEAD(vmap_area_list);
static LLIST_HEAD(vmap_purge_list);
static struct rb_root vmap_area_root = RB_ROOT; static struct rb_root vmap_area_root = RB_ROOT;
/* The vmap cache globals are protected by vmap_area_lock */ /* The vmap cache globals are protected by vmap_area_lock */
...@@ -601,7 +600,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, ...@@ -601,7 +600,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
int sync, int force_flush) int sync, int force_flush)
{ {
static DEFINE_SPINLOCK(purge_lock); static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist); struct llist_node *valist;
struct vmap_area *va; struct vmap_area *va;
struct vmap_area *n_va; struct vmap_area *n_va;
int nr = 0; int nr = 0;
...@@ -620,20 +619,14 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, ...@@ -620,20 +619,14 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (sync) if (sync)
purge_fragmented_blocks_allcpus(); purge_fragmented_blocks_allcpus();
rcu_read_lock(); valist = llist_del_all(&vmap_purge_list);
list_for_each_entry_rcu(va, &vmap_area_list, list) { llist_for_each_entry(va, valist, purge_list) {
if (va->flags & VM_LAZY_FREE) { if (va->va_start < *start)
if (va->va_start < *start) *start = va->va_start;
*start = va->va_start; if (va->va_end > *end)
if (va->va_end > *end) *end = va->va_end;
*end = va->va_end; nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
list_add_tail(&va->purge_list, &valist);
va->flags |= VM_LAZY_FREEING;
va->flags &= ~VM_LAZY_FREE;
}
} }
rcu_read_unlock();
if (nr) if (nr)
atomic_sub(nr, &vmap_lazy_nr); atomic_sub(nr, &vmap_lazy_nr);
...@@ -643,7 +636,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, ...@@ -643,7 +636,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) { if (nr) {
spin_lock(&vmap_area_lock); spin_lock(&vmap_area_lock);
list_for_each_entry_safe(va, n_va, &valist, purge_list) llist_for_each_entry_safe(va, n_va, valist, purge_list)
__free_vmap_area(va); __free_vmap_area(va);
spin_unlock(&vmap_area_lock); spin_unlock(&vmap_area_lock);
} }
...@@ -678,9 +671,15 @@ static void purge_vmap_area_lazy(void) ...@@ -678,9 +671,15 @@ static void purge_vmap_area_lazy(void)
*/ */
static void free_vmap_area_noflush(struct vmap_area *va) static void free_vmap_area_noflush(struct vmap_area *va)
{ {
va->flags |= VM_LAZY_FREE; int nr_lazy;
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
&vmap_lazy_nr);
/* After this point, we may free va at any time */
llist_add(&va->purge_list, &vmap_purge_list);
if (unlikely(nr_lazy > lazy_max_pages()))
try_purge_vmap_area_lazy(); try_purge_vmap_area_lazy();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册