提交 6b6309b4 编写于 作者: M Mike Travis 提交者: Ingo Molnar

x86: reduce memory and stack usage in intel_cacheinfo

* Change the following static arrays sized by NR_CPUS to
  per_cpu data variables:

	_cpuid4_info *cpuid4_info[NR_CPUS];
	_index_kobject *index_kobject[NR_CPUS];
	kobject * cache_kobject[NR_CPUS];

* Remove the local NR_CPUS array with a kmalloc'd region in
  show_shared_cpu_map().

Also some minor complaints from checkpatch.pl fixed.

Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NMike Travis <travis@sgi.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 30ca60c1
...@@ -129,7 +129,7 @@ struct _cpuid4_info { ...@@ -129,7 +129,7 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx; union _cpuid4_leaf_ecx ecx;
unsigned long size; unsigned long size;
cpumask_t shared_cpu_map; cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
}; };
unsigned short num_cache_leaves; unsigned short num_cache_leaves;
...@@ -451,8 +451,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -451,8 +451,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
} }
/* pointer to _cpuid4_info array (for each cache leaf) */ /* pointer to _cpuid4_info array (for each cache leaf) */
static struct _cpuid4_info *cpuid4_info[NR_CPUS]; static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
...@@ -474,7 +474,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ...@@ -474,7 +474,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
if (cpu_data(i).apicid >> index_msb == if (cpu_data(i).apicid >> index_msb ==
c->apicid >> index_msb) { c->apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map); cpu_set(i, this_leaf->shared_cpu_map);
if (i != cpu && cpuid4_info[i]) { if (i != cpu && per_cpu(cpuid4_info, i)) {
sibling_leaf = CPUID4_INFO_IDX(i, index); sibling_leaf = CPUID4_INFO_IDX(i, index);
cpu_set(cpu, sibling_leaf->shared_cpu_map); cpu_set(cpu, sibling_leaf->shared_cpu_map);
} }
...@@ -505,8 +505,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) ...@@ -505,8 +505,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++) for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i); cache_remove_shared_cpu_map(cpu, i);
kfree(cpuid4_info[cpu]); kfree(per_cpu(cpuid4_info, cpu));
cpuid4_info[cpu] = NULL; per_cpu(cpuid4_info, cpu) = NULL;
} }
static int __cpuinit detect_cache_attributes(unsigned int cpu) static int __cpuinit detect_cache_attributes(unsigned int cpu)
...@@ -519,9 +519,9 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -519,9 +519,9 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
return -ENOENT; return -ENOENT;
cpuid4_info[cpu] = kzalloc( per_cpu(cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
if (cpuid4_info[cpu] == NULL) if (per_cpu(cpuid4_info, cpu) == NULL)
return -ENOMEM; return -ENOMEM;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
...@@ -546,8 +546,8 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -546,8 +546,8 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
out: out:
if (retval) { if (retval) {
kfree(cpuid4_info[cpu]); kfree(per_cpu(cpuid4_info, cpu));
cpuid4_info[cpu] = NULL; per_cpu(cpuid4_info, cpu) = NULL;
} }
return retval; return retval;
...@@ -561,7 +561,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -561,7 +561,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
/* pointer to kobject for cpuX/cache */ /* pointer to kobject for cpuX/cache */
static struct kobject * cache_kobject[NR_CPUS]; static DEFINE_PER_CPU(struct kobject *, cache_kobject);
struct _index_kobject { struct _index_kobject {
struct kobject kobj; struct kobject kobj;
...@@ -570,8 +570,8 @@ struct _index_kobject { ...@@ -570,8 +570,8 @@ struct _index_kobject {
}; };
/* pointer to array of kobjects for cpuX/cache/indexY */ /* pointer to array of kobjects for cpuX/cache/indexY */
static struct _index_kobject *index_kobject[NR_CPUS]; static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y])) #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \ #define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \ static ssize_t show_##file_name \
...@@ -593,9 +593,16 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) ...@@ -593,9 +593,16 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
{ {
char mask_str[NR_CPUS]; int n = 0;
cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map); int len = cpumask_scnprintf_len(nr_cpu_ids);
return sprintf(buf, "%s\n", mask_str); char *mask_str = kmalloc(len, GFP_KERNEL);
if (mask_str) {
cpumask_scnprintf(mask_str, len, this_leaf->shared_cpu_map);
n = sprintf(buf, "%s\n", mask_str);
kfree(mask_str);
}
return n;
} }
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
...@@ -684,10 +691,10 @@ static struct kobj_type ktype_percpu_entry = { ...@@ -684,10 +691,10 @@ static struct kobj_type ktype_percpu_entry = {
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{ {
kfree(cache_kobject[cpu]); kfree(per_cpu(cache_kobject, cpu));
kfree(index_kobject[cpu]); kfree(per_cpu(index_kobject, cpu));
cache_kobject[cpu] = NULL; per_cpu(cache_kobject, cpu) = NULL;
index_kobject[cpu] = NULL; per_cpu(index_kobject, cpu) = NULL;
free_cache_attributes(cpu); free_cache_attributes(cpu);
} }
...@@ -703,13 +710,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) ...@@ -703,13 +710,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return err; return err;
/* Allocate all required memory */ /* Allocate all required memory */
cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); per_cpu(cache_kobject, cpu) =
if (unlikely(cache_kobject[cpu] == NULL)) kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
goto err_out; goto err_out;
index_kobject[cpu] = kzalloc( per_cpu(index_kobject, cpu) = kzalloc(
sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
if (unlikely(index_kobject[cpu] == NULL)) if (unlikely(per_cpu(index_kobject, cpu) == NULL))
goto err_out; goto err_out;
return 0; return 0;
...@@ -733,7 +741,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -733,7 +741,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0)) if (unlikely(retval < 0))
return retval; return retval;
retval = kobject_init_and_add(cache_kobject[cpu], &ktype_percpu_entry, retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
&ktype_percpu_entry,
&sys_dev->kobj, "%s", "cache"); &sys_dev->kobj, "%s", "cache");
if (retval < 0) { if (retval < 0) {
cpuid4_cache_sysfs_exit(cpu); cpuid4_cache_sysfs_exit(cpu);
...@@ -745,13 +754,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -745,13 +754,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object->cpu = cpu; this_object->cpu = cpu;
this_object->index = i; this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj), retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache, cache_kobject[cpu], &ktype_cache,
per_cpu(cache_kobject, cpu),
"index%1lu", i); "index%1lu", i);
if (unlikely(retval)) { if (unlikely(retval)) {
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
} }
kobject_put(cache_kobject[cpu]); kobject_put(per_cpu(cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu); cpuid4_cache_sysfs_exit(cpu);
break; break;
} }
...@@ -760,7 +770,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -760,7 +770,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (!retval) if (!retval)
cpu_set(cpu, cache_dev_map); cpu_set(cpu, cache_dev_map);
kobject_uevent(cache_kobject[cpu], KOBJ_ADD); kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
return retval; return retval;
} }
...@@ -769,7 +779,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) ...@@ -769,7 +779,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id; unsigned int cpu = sys_dev->id;
unsigned long i; unsigned long i;
if (cpuid4_info[cpu] == NULL) if (per_cpu(cpuid4_info, cpu) == NULL)
return; return;
if (!cpu_isset(cpu, cache_dev_map)) if (!cpu_isset(cpu, cache_dev_map))
return; return;
...@@ -777,7 +787,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) ...@@ -777,7 +787,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
for (i = 0; i < num_cache_leaves; i++) for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
kobject_put(cache_kobject[cpu]); kobject_put(per_cpu(cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu); cpuid4_cache_sysfs_exit(cpu);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册