提交 e5bc8b6b 编写于 作者: A Andi Kleen 提交者: Linus Torvalds

[PATCH] x86-64: Make remote TLB flush more scalable

Instead of using a global spinlock to protect the state
of the remote TLB flush use a lock and state for each sending CPU.

To tell the receiver where to look for the state use 8 different
call vectors.  Each CPU uses a specific vector to trigger flushes on other
CPUs. Depending on the received vector the target CPUs look into
the right per cpu variable for the flush data.

When the system has more than 8 CPUs they are hashed to the 8 available
vectors. The limited global vector space forces us to this right now.
In future when interrupts are split into per CPU domains this could be
fixed, at the cost of needing more IPIs in flat mode.

Also some minor cleanup in the smp flush code and remove some outdated
debug code.

Requires patch to move cpu_possible_map setup earlier.
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 83b942bd
...@@ -536,8 +536,19 @@ ENTRY(thermal_interrupt) ...@@ -536,8 +536,19 @@ ENTRY(thermal_interrupt)
ENTRY(reschedule_interrupt) ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
ENTRY(invalidate_interrupt) .macro INVALIDATE_ENTRY num
apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt ENTRY(invalidate_interrupt\num)
apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
.endm
INVALIDATE_ENTRY 0
INVALIDATE_ENTRY 1
INVALIDATE_ENTRY 2
INVALIDATE_ENTRY 3
INVALIDATE_ENTRY 4
INVALIDATE_ENTRY 5
INVALIDATE_ENTRY 6
INVALIDATE_ENTRY 7
ENTRY(call_function_interrupt) ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
......
...@@ -486,7 +486,14 @@ void spurious_interrupt(void); ...@@ -486,7 +486,14 @@ void spurious_interrupt(void);
void error_interrupt(void); void error_interrupt(void);
void reschedule_interrupt(void); void reschedule_interrupt(void);
void call_function_interrupt(void); void call_function_interrupt(void);
void invalidate_interrupt(void); void invalidate_interrupt0(void);
void invalidate_interrupt1(void);
void invalidate_interrupt2(void);
void invalidate_interrupt3(void);
void invalidate_interrupt4(void);
void invalidate_interrupt5(void);
void invalidate_interrupt6(void);
void invalidate_interrupt7(void);
void thermal_interrupt(void); void thermal_interrupt(void);
void i8254_timer_resume(void); void i8254_timer_resume(void);
...@@ -562,8 +569,15 @@ void __init init_IRQ(void) ...@@ -562,8 +569,15 @@ void __init init_IRQ(void)
*/ */
set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPI for invalidation */ /* IPIs for invalidation */
set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); set_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
set_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
/* IPI for generic function call */ /* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#define __cpuinit __init
/* /*
* Smarter SMP flushing macros. * Smarter SMP flushing macros.
* c/o Linus Torvalds. * c/o Linus Torvalds.
...@@ -37,19 +39,41 @@ ...@@ -37,19 +39,41 @@
* writing to user space from interrupts. (Its not allowed anyway). * writing to user space from interrupts. (Its not allowed anyway).
* *
* Optimizations Manfred Spraul <manfred@colorfullife.com> * Optimizations Manfred Spraul <manfred@colorfullife.com>
*
* More scalable flush, from Andi Kleen
*
* To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into
* the right per cpu variable for the flush data.
*
* With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now.
* In future when interrupts are split into per CPU domains this could be
* fixed, at the cost of triggering multiple IPIs in some cases.
*/ */
static cpumask_t flush_cpumask; union smp_flush_state {
static struct mm_struct * flush_mm; struct {
static unsigned long flush_va; cpumask_t flush_cpumask;
static DEFINE_SPINLOCK(tlbstate_lock); struct mm_struct *flush_mm;
unsigned long flush_va;
#define FLUSH_ALL -1ULL #define FLUSH_ALL -1ULL
spinlock_t tlbstate_lock;
};
char pad[SMP_CACHE_BYTES];
} ____cacheline_aligned;
/* State is put into the per CPU data section, but padded
to a full cache line because other CPUs can access it and we don't
want false sharing in the per cpu data segment. */
static DEFINE_PER_CPU(union smp_flush_state, flush_state);
/* /*
* We cannot call mmdrop() because we are in interrupt context, * We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask. * instead update mm->cpu_vm_mask.
*/ */
static inline void leave_mm (unsigned long cpu) static inline void leave_mm(int cpu)
{ {
if (read_pda(mmu_state) == TLBSTATE_OK) if (read_pda(mmu_state) == TLBSTATE_OK)
BUG(); BUG();
...@@ -101,15 +125,25 @@ static inline void leave_mm (unsigned long cpu) ...@@ -101,15 +125,25 @@ static inline void leave_mm (unsigned long cpu)
* *
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode. * 2) Leave the mm if we are in the lazy tlb mode.
*
* Interrupts are disabled.
*/ */
asmlinkage void smp_invalidate_interrupt (void) asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{ {
unsigned long cpu; int cpu;
int sender;
union smp_flush_state *f;
cpu = get_cpu(); cpu = smp_processor_id();
/*
* orig_rax contains the interrupt vector - 256.
* Use that to determine where the sender put the data.
*/
sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, flush_cpumask)) if (!cpu_isset(cpu, f->flush_cpumask))
goto out; goto out;
/* /*
* This was a BUG() but until someone can quote me the * This was a BUG() but until someone can quote me the
...@@ -120,65 +154,64 @@ asmlinkage void smp_invalidate_interrupt (void) ...@@ -120,65 +154,64 @@ asmlinkage void smp_invalidate_interrupt (void)
* BUG(); * BUG();
*/ */
if (flush_mm == read_pda(active_mm)) { if (f->flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) { if (read_pda(mmu_state) == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL) if (f->flush_va == FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
__flush_tlb_one(flush_va); __flush_tlb_one(f->flush_va);
} else } else
leave_mm(cpu); leave_mm(cpu);
} }
out: out:
ack_APIC_irq(); ack_APIC_irq();
cpu_clear(cpu, flush_cpumask); cpu_clear(cpu, f->flush_cpumask);
put_cpu_no_resched();
} }
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
cpumask_t tmp; int sender;
/* union smp_flush_state *f;
* A couple of (to be removed) sanity checks:
*
* - we do not send IPIs to not-yet booted CPUs.
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
cpus_and(tmp, cpumask, cpu_online_map);
BUG_ON(!cpus_equal(tmp, cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
if (!mm)
BUG();
/* /* Caller has disabled preemption */
* I'm not happy about this global shared spinlock in the sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
* MM hot path, but we'll see how contended it is. f = &per_cpu(flush_state, sender);
* Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm; /* Could avoid this lock when
flush_va = va; num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
cpus_or(flush_cpumask, cpumask, flush_cpumask); probably not worth checking this for a cache-hot lock. */
spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
f->flush_va = va;
cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
/* /*
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
while (!cpus_empty(flush_cpumask)) while (!cpus_empty(f->flush_cpumask))
mb(); /* nothing. lockup detection does not belong here */; cpu_relax();
flush_mm = NULL; f->flush_mm = NULL;
flush_va = 0; f->flush_va = 0;
spin_unlock(&tlbstate_lock); spin_unlock(&f->tlbstate_lock);
} }
int __cpuinit init_smp_flush(void)
{
int i;
for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
}
return 0;
}
core_initcall(init_smp_flush);
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
......
...@@ -50,14 +50,15 @@ struct hw_interrupt_type; ...@@ -50,14 +50,15 @@ struct hw_interrupt_type;
*/ */
#define SPURIOUS_APIC_VECTOR 0xff #define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe #define ERROR_APIC_VECTOR 0xfe
#define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfc
#define TASK_MIGRATION_VECTOR 0xfb #define KDB_VECTOR 0xfb /* reserved for KDB */
#define CALL_FUNCTION_VECTOR 0xfa #define THERMAL_APIC_VECTOR 0xfa
#define KDB_VECTOR 0xf9 /* 0xf9 free */
#define INVALIDATE_TLB_VECTOR_END 0xf8
#define THERMAL_APIC_VECTOR 0xf0 #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
/* /*
* Local APIC timer IRQ vector is on a different priority level, * Local APIC timer IRQ vector is on a different priority level,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册