提交 6eeb7c5a 编写于 作者: M Mike Travis

x86: update add-cpu_mask_to_apicid_and to use struct cpumask*

Impact: use updated APIs

Various API updates for x86:add-cpu_mask_to_apicid_and

(Note: separate because previous patch has been "backported" to 2.6.27.)
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: NMike Travis <travis@sgi.com>
上级 95d313cf
...@@ -129,8 +129,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -129,8 +129,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int cpu; int cpu;
...@@ -138,9 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -138,9 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_any_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask)) if (cpu < nr_cpu_ids)
return cpu_to_logical_apicid(cpu); return cpu_to_logical_apicid(cpu);
return BAD_APICID; return BAD_APICID;
} }
......
...@@ -214,8 +214,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -214,8 +214,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int num_bits_set; int num_bits_set;
int num_bits_set2; int num_bits_set2;
...@@ -223,9 +223,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -223,9 +223,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
int cpu; int cpu;
int apicid = 0; int apicid = 0;
num_bits_set = cpus_weight(*cpumask); num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpus_weight(*andmask); num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min_t(int, num_bits_set, num_bits_set2); num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */ /* Return id to all */
if (num_bits_set >= nr_cpu_ids) if (num_bits_set >= nr_cpu_ids)
#if defined CONFIG_ES7000_CLUSTERED_APIC #if defined CONFIG_ES7000_CLUSTERED_APIC
...@@ -237,11 +237,12 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -237,11 +237,12 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_first_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask) apicid = cpu_to_logical_apicid(cpu);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { if (cpumask_test_cpu(cpu, cpumask) &&
cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) { apicid_cluster(new_apicid)) {
......
...@@ -58,8 +58,8 @@ struct genapic { ...@@ -58,8 +58,8 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x); unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask; unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const cpumask_t *andmask); const struct cpumask *andmask);
void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -31,8 +31,8 @@ struct genapic { ...@@ -31,8 +31,8 @@ struct genapic {
void (*send_IPI_self)(int vector); void (*send_IPI_self)(int vector);
/* */ /* */
unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const cpumask_t *andmask); const struct cpumask *andmask);
unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x); unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id); unsigned long (*set_apic_id)(unsigned int id);
......
...@@ -67,11 +67,11 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -67,11 +67,11 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return cpus_addr(*cpumask)[0]; return cpus_addr(*cpumask)[0];
} }
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
unsigned long mask1 = cpus_addr(*cpumask)[0]; unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpus_addr(*andmask)[0]; unsigned long mask2 = cpumask_bits(andmask)[0];
return (unsigned int)(mask1 & mask2); return (unsigned int)(mask1 & mask2);
} }
......
...@@ -127,8 +127,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -127,8 +127,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return (int) 0xF; return (int) 0xF;
} }
static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
return (int) 0xF; return (int) 0xF;
} }
......
...@@ -170,8 +170,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -170,8 +170,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int num_bits_set; int num_bits_set;
int num_bits_set2; int num_bits_set2;
...@@ -179,9 +179,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -179,9 +179,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
int cpu; int cpu;
int apicid = 0; int apicid = 0;
num_bits_set = cpus_weight(*cpumask); num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpus_weight(*andmask); num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min_t(int, num_bits_set, num_bits_set2); num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */ /* Return id to all */
if (num_bits_set >= nr_cpu_ids) if (num_bits_set >= nr_cpu_ids)
return 0xFF; return 0xFF;
...@@ -189,11 +189,11 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -189,11 +189,11 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_first_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask) apicid = cpu_to_logical_apicid(cpu);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { if (cpumask_test_cpu(cpu, cpumask)
&& cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) { apicid_cluster(new_apicid)) {
......
...@@ -158,13 +158,13 @@ static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -158,13 +158,13 @@ static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
} }
static unsigned int flat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
unsigned long mask1 = cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
unsigned long mask2 = cpus_addr(*andmask)[0] & APIC_ALL_CPUS; unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
return (int)(mask1 & mask2); return mask1 & mask2;
} }
static unsigned int phys_pkg_id(int index_msb) static unsigned int phys_pkg_id(int index_msb)
...@@ -264,8 +264,9 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -264,8 +264,9 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
return BAD_APICID; return BAD_APICID;
} }
static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, static unsigned int
const cpumask_t *andmask) physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{ {
int cpu; int cpu;
...@@ -273,9 +274,9 @@ static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -273,9 +274,9 @@ static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_any_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask)) if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID; return BAD_APICID;
} }
......
...@@ -123,8 +123,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -123,8 +123,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
return BAD_APICID; return BAD_APICID;
} }
static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int cpu; int cpu;
...@@ -132,9 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -132,9 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_any_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask)) if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID; return BAD_APICID;
} }
......
...@@ -122,8 +122,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -122,8 +122,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
return BAD_APICID; return BAD_APICID;
} }
static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int cpu; int cpu;
...@@ -131,9 +131,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -131,9 +131,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_any_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask)) if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID; return BAD_APICID;
} }
......
...@@ -179,8 +179,8 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -179,8 +179,8 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
return BAD_APICID; return BAD_APICID;
} }
static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask, static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const cpumask_t *andmask) const struct cpumask *andmask)
{ {
int cpu; int cpu;
...@@ -188,9 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask, ...@@ -188,9 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) cpu = cpumask_any_and(cpumask, andmask);
if (cpu_isset(cpu, *andmask)) if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID; return BAD_APICID;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册