提交 504f52b5 编写于 作者: E Eric Dumazet 提交者: Linus Torvalds

mm: NUMA aware alloc_task_struct_node()

All kthreads being created from a single helper task, they all use memory
from a single node for their kernel stack and task struct.

This patch suite creates kthread_create_on_cpu(), adding a 'cpu' parameter
to parameters already used by kthread_create().

This parameter serves in allocating memory for the new kthread on its
memory node if available.

Users of this new function are : ksoftirqd, kworker, migration, pktgend...

This patch:

Add a node parameter to alloc_task_struct(), and change its name to
alloc_task_struct_node()

This change is needed to allow NUMA aware kthread_create_on_cpu()
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Acked-by: NDavid S. Miller <davem@davemloft.net>
Reviewed-by: NAndi Kleen <ak@linux.intel.com>
Acked-by: NRusty Russell <rusty@rustcorp.com.au>
Cc: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: David Howells <dhowells@redhat.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9d502c1c
...@@ -137,7 +137,7 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -137,7 +137,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp) #define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
/* Allocation and freeing of basic task resources. */ /* Allocation and freeing of basic task resources. */
extern struct task_struct *alloc_task_struct(void); extern struct task_struct *alloc_task_struct_node(int node);
extern void free_task_struct(struct task_struct *p); extern void free_task_struct(struct task_struct *p);
#define cpu_relax() barrier() #define cpu_relax() barrier()
......
...@@ -44,9 +44,10 @@ asmlinkage void ret_from_fork(void); ...@@ -44,9 +44,10 @@ asmlinkage void ret_from_fork(void);
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
struct task_struct *alloc_task_struct(void) struct task_struct *alloc_task_struct_node(int node)
{ {
struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL); struct task_struct *p = kmalloc_node(THREAD_SIZE, GFP_KERNEL, node);
if (p) if (p)
atomic_set((atomic_t *)(p+1), 1); atomic_set((atomic_t *)(p+1), 1);
return p; return p;
......
...@@ -84,7 +84,14 @@ struct thread_info { ...@@ -84,7 +84,14 @@ struct thread_info {
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define alloc_task_struct_node(node) \
({ \
struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
KERNEL_STACK_SIZE_ORDER); \
struct task_struct *ret = page ? page_address(page) : NULL; \
\
ret;
})
#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
#endif /* !__ASSEMBLY */ #endif /* !__ASSEMBLY */
......
...@@ -66,7 +66,7 @@ struct thread_struct { ...@@ -66,7 +66,7 @@ struct thread_struct {
.request = { 0 } \ .request = { 0 } \
} }
extern struct task_struct *alloc_task_struct(void); extern struct task_struct *alloc_task_struct_node(int node);
static inline void release_thread(struct task_struct *task) static inline void release_thread(struct task_struct *task)
{ {
......
...@@ -109,8 +109,10 @@ int nr_processes(void) ...@@ -109,8 +109,10 @@ int nr_processes(void)
} }
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) # define alloc_task_struct_node(node) \
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
# define free_task_struct(tsk) \
kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep; static struct kmem_cache *task_struct_cachep;
#endif #endif
...@@ -249,12 +251,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -249,12 +251,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
struct task_struct *tsk; struct task_struct *tsk;
struct thread_info *ti; struct thread_info *ti;
unsigned long *stackend; unsigned long *stackend;
int node = numa_node_id();
int err; int err;
prepare_to_copy(orig); prepare_to_copy(orig);
tsk = alloc_task_struct(); tsk = alloc_task_struct_node(node);
if (!tsk) if (!tsk)
return NULL; return NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册