提交 16da2f93 编写于 作者: T Thomas Gleixner 提交者: Ingo Molnar

x86: smp_64.c: remove unused exports and cleanup while at it

The exports are nowhere used. There is even no reason why they were
ever introduced.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 081e10b9
......@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender);
/* Could avoid this lock when
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
probably not worth checking this for a cache-hot lock. */
/*
* Could avoid this lock when
* num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
* probably not worth checking this for a cache-hot lock.
*/
spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
......@@ -202,12 +204,12 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
int __cpuinit init_smp_flush(void)
{
int i;
for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
}
return 0;
}
core_initcall(init_smp_flush);
void flush_tlb_current_task(void)
......@@ -224,7 +226,6 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm)
{
......@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
......@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
......@@ -325,9 +324,7 @@ void unlock_ipi_call_lock(void)
* this function sends a 'generic call function' IPI to all other CPU
* of the system defined in the mask.
*/
static int
__smp_call_function_mask(cpumask_t mask,
static int __smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
......@@ -420,8 +417,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
int ret, me = get_cpu();
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册