提交 38cb162b 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] wire up pselect, ppoll
  [IA64] Add TIF_RESTORE_SIGMASK
  [IA64] unwind did not work for processes born with CLONE_STOPPED
  [IA64] Optional method to purge the TLB on SN systems
  [IA64] SPIN_LOCK_UNLOCKED macro cleanup in arch/ia64
  [IA64-SN2][KJ] mmtimer.c-kzalloc
  [IA64] fix stack alignment for ia32 signal handlers
  [IA64] - Altix: hotplug after intr redirect can crash system
  [IA64] save and restore cpus_allowed in cpu_idle_wait
  [IA64] Removal of percpu TR cleanup in kexec code
  [IA64] Fix some section mismatch errors
...@@ -52,43 +52,6 @@ ENTRY(ia32_clone) ...@@ -52,43 +52,6 @@ ENTRY(ia32_clone)
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia32_clone) END(ia32_clone)
ENTRY(sys32_rt_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
mov loc0=rp
mov out0=in0 // mask
mov out1=in1 // sigsetsize
mov out2=sp // out2 = &sigscratch
.fframe 16
adds sp=-16,sp // allocate dummy "sigscratch"
;;
.body
br.call.sptk.many rp=ia32_rt_sigsuspend
1: .restore sp
adds sp=16,sp
mov rp=loc0
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys32_rt_sigsuspend)
ENTRY(sys32_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
mov loc0=rp
mov out0=in2 // mask (first two args are ignored)
;;
mov out1=sp // out1 = &sigscratch
.fframe 16
adds sp=-16,sp // allocate dummy "sigscratch"
.body
br.call.sptk.many rp=ia32_sigsuspend
1: .restore sp
adds sp=16,sp
mov rp=loc0
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone) GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
{ /* { /*
...@@ -389,7 +352,7 @@ ia32_syscall_table: ...@@ -389,7 +352,7 @@ ia32_syscall_table:
data8 sys_rt_sigpending data8 sys_rt_sigpending
data8 compat_sys_rt_sigtimedwait data8 compat_sys_rt_sigtimedwait
data8 sys32_rt_sigqueueinfo data8 sys32_rt_sigqueueinfo
data8 sys32_rt_sigsuspend data8 compat_sys_rt_sigsuspend
data8 sys32_pread /* 180 */ data8 sys32_pread /* 180 */
data8 sys32_pwrite data8 sys32_pwrite
data8 sys_chown /* 16-bit version */ data8 sys_chown /* 16-bit version */
......
...@@ -451,59 +451,20 @@ sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int r ...@@ -451,59 +451,20 @@ sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int r
sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
} }
long asmlinkage long
__ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr) sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
{ {
extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall); mask &= _BLOCKABLE;
sigset_t oldset, set;
scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */
memset(&set, 0, sizeof(set));
memcpy(&set.sig, &sset->sig, sigsetsize);
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
{ current->saved_sigmask = current->blocked;
oldset = current->blocked; siginitset(&current->blocked, mask);
current->blocked = set; recalc_sigpending();
recalc_sigpending();
}
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
/* current->state = TASK_INTERRUPTIBLE;
* The return below usually returns to the signal handler. We need to pre-set the schedule();
* correct error code here to ensure that the right values get saved in sigcontext set_thread_flag(TIF_RESTORE_SIGMASK);
* by ia64_do_signal. return -ERESTARTNOHAND;
*/
scr->pt.r8 = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (ia64_do_signal(&oldset, scr, 1))
return -EINTR;
}
}
asmlinkage long
ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr)
{
compat_sigset_t set;
if (sigsetsize > sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&set.sig, &uset->sig, sigsetsize))
return -EFAULT;
return __ia32_rt_sigsuspend(&set, sigsetsize, scr);
}
asmlinkage long
ia32_sigsuspend (unsigned int mask, struct sigscratch *scr)
{
return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr);
} }
asmlinkage long asmlinkage long
...@@ -810,7 +771,11 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) ...@@ -810,7 +771,11 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
} }
/* Legacy stack switching not supported */ /* Legacy stack switching not supported */
return (void __user *)((esp - frame_size) & -8ul); esp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
esp = ((esp + 4) & -16ul) - 4;
return (void __user *) esp;
} }
static int static int
......
...@@ -1199,32 +1199,6 @@ ENTRY(notify_resume_user) ...@@ -1199,32 +1199,6 @@ ENTRY(notify_resume_user)
br.ret.sptk.many rp br.ret.sptk.many rp
END(notify_resume_user) END(notify_resume_user)
GLOBAL_ENTRY(sys_rt_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
mov r9=ar.unat
mov loc0=rp // save return address
mov out0=in0 // mask
mov out1=in1 // sigsetsize
adds out2=8,sp // out2=&sigscratch->ar_pfs
;;
.fframe 16
.spillsp ar.unat, 16
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
.body
br.call.sptk.many rp=ia64_rt_sigsuspend
.ret17: .restore sp
adds sp=16,sp // pop scratch stack space
;;
ld8 r9=[sp] // load new unat from sw->caller_unat
mov rp=loc0
;;
mov ar.unat=r9
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys_rt_sigsuspend)
ENTRY(sys_rt_sigreturn) ENTRY(sys_rt_sigreturn)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
...@@ -1598,8 +1572,8 @@ sys_call_table: ...@@ -1598,8 +1572,8 @@ sys_call_table:
data8 sys_readlinkat data8 sys_readlinkat
data8 sys_fchmodat data8 sys_fchmodat
data8 sys_faccessat data8 sys_faccessat
data8 sys_ni_syscall // reserved for pselect data8 sys_pselect6
data8 sys_ni_syscall // 1295 reserved for ppoll data8 sys_ppoll
data8 sys_unshare data8 sys_unshare
data8 sys_splice data8 sys_splice
data8 sys_set_robust_list data8 sys_set_robust_list
......
...@@ -1012,7 +1012,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -1012,7 +1012,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
/* /*
* ACPI calls this when it finds an entry for a legacy ISA IRQ override. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
*/ */
void __init void __devinit
iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long polarity, unsigned long polarity,
unsigned long trigger) unsigned long trigger)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
# include <asm/perfmon.h> # include <asm/perfmon.h>
...@@ -126,8 +127,10 @@ void destroy_irq(unsigned int irq) ...@@ -126,8 +127,10 @@ void destroy_irq(unsigned int irq)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
#else #else
# define IS_RESCHEDULE(vec) (0) # define IS_RESCHEDULE(vec) (0)
# define IS_LOCAL_TLB_FLUSH(vec) (0)
#endif #endif
/* /*
* That's where the IVT branches when we get an external * That's where the IVT branches when we get an external
...@@ -179,8 +182,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -179,8 +182,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d(); ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (unlikely(IS_RESCHEDULE(vector))) if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
kstat_this_cpu.irqs[vector]++; smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
else { else {
ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d(); ia64_srlz_d();
...@@ -226,8 +232,11 @@ void ia64_process_pending_intr(void) ...@@ -226,8 +232,11 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing * Perform normal interrupt style processing
*/ */
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (unlikely(IS_RESCHEDULE(vector))) if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
kstat_this_cpu.irqs[vector]++; smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
else { else {
struct pt_regs *old_regs = set_irq_regs(NULL); struct pt_regs *old_regs = set_irq_regs(NULL);
...@@ -259,12 +268,12 @@ void ia64_process_pending_intr(void) ...@@ -259,12 +268,12 @@ void ia64_process_pending_intr(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern irqreturn_t handle_IPI (int irq, void *dev_id);
static irqreturn_t dummy_handler (int irq, void *dev_id) static irqreturn_t dummy_handler (int irq, void *dev_id)
{ {
BUG(); BUG();
} }
extern irqreturn_t handle_IPI (int irq, void *dev_id);
static struct irqaction ipi_irqaction = { static struct irqaction ipi_irqaction = {
.handler = handle_IPI, .handler = handle_IPI,
...@@ -277,6 +286,13 @@ static struct irqaction resched_irqaction = { ...@@ -277,6 +286,13 @@ static struct irqaction resched_irqaction = {
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED,
.name = "resched" .name = "resched"
}; };
static struct irqaction tlb_irqaction = {
.handler = dummy_handler,
.flags = SA_INTERRUPT,
.name = "tlb_flush"
};
#endif #endif
void void
...@@ -302,6 +318,7 @@ init_IRQ (void) ...@@ -302,6 +318,7 @@ init_IRQ (void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
#endif #endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
pfm_init_percpu(); pfm_init_percpu();
......
...@@ -155,7 +155,7 @@ show_regs (struct pt_regs *regs) ...@@ -155,7 +155,7 @@ show_regs (struct pt_regs *regs)
} }
void void
do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
{ {
if (fsys_mode(current, &scr->pt)) { if (fsys_mode(current, &scr->pt)) {
/* defer signal-handling etc. until we return to privilege-level 0. */ /* defer signal-handling etc. until we return to privilege-level 0. */
...@@ -170,8 +170,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ...@@ -170,8 +170,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
#endif #endif
/* deal with pending signal delivery */ /* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING)) if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
ia64_do_signal(oldset, scr, in_syscall); ia64_do_signal(scr, in_syscall);
} }
static int pal_halt = 1; static int pal_halt = 1;
...@@ -236,6 +236,7 @@ void cpu_idle_wait(void) ...@@ -236,6 +236,7 @@ void cpu_idle_wait(void)
{ {
unsigned int cpu, this_cpu = get_cpu(); unsigned int cpu, this_cpu = get_cpu();
cpumask_t map; cpumask_t map;
cpumask_t tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu(); put_cpu();
...@@ -257,6 +258,7 @@ void cpu_idle_wait(void) ...@@ -257,6 +258,7 @@ void cpu_idle_wait(void)
} }
cpus_and(map, map, cpu_online_map); cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map)); } while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
} }
EXPORT_SYMBOL_GPL(cpu_idle_wait); EXPORT_SYMBOL_GPL(cpu_idle_wait);
......
...@@ -94,7 +94,7 @@ GLOBAL_ENTRY(relocate_new_kernel) ...@@ -94,7 +94,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
4: 4:
srlz.i srlz.i
;; ;;
//purge TR entry for kernel text and data // purge TR entry for kernel text and data
movl r16=KERNEL_START movl r16=KERNEL_START
mov r18=KERNEL_TR_PAGE_SHIFT<<2 mov r18=KERNEL_TR_PAGE_SHIFT<<2
;; ;;
...@@ -104,15 +104,6 @@ GLOBAL_ENTRY(relocate_new_kernel) ...@@ -104,15 +104,6 @@ GLOBAL_ENTRY(relocate_new_kernel)
srlz.i srlz.i
;; ;;
// purge TR entry for percpu data
movl r16=PERCPU_ADDR
mov r18=PERCPU_PAGE_SHIFT<<2
;;
ptr.d r16,r18
;;
srlz.d
;;
// purge TR entry for pal code // purge TR entry for pal code
mov r16=in3 mov r16=in3
mov r18=IA64_GRANULE_SHIFT<<2 mov r18=IA64_GRANULE_SHIFT<<2
......
...@@ -786,7 +786,7 @@ identify_cpu (struct cpuinfo_ia64 *c) ...@@ -786,7 +786,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
} }
void void __init
setup_per_cpu_areas (void) setup_per_cpu_areas (void)
{ {
/* start_kernel() requires this... */ /* start_kernel() requires this... */
......
...@@ -22,4 +22,4 @@ struct sigframe { ...@@ -22,4 +22,4 @@ struct sigframe {
struct sigcontext sc; struct sigcontext sc;
}; };
extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); extern void ia64_do_signal (struct sigscratch *, long);
...@@ -40,47 +40,6 @@ ...@@ -40,47 +40,6 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif #endif
long
ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr)
{
sigset_t oldset, set;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (!access_ok(VERIFY_READ, uset, sigsetsize))
return -EFAULT;
if (GET_SIGSET(&set, uset))
return -EFAULT;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
{
oldset = current->blocked;
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sighand->siglock);
/*
* The return below usually returns to the signal handler. We need to
* pre-set the correct error code here to ensure that the right values
* get saved in sigcontext by ia64_do_signal.
*/
scr->pt.r8 = EINTR;
scr->pt.r10 = -1;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (ia64_do_signal(&oldset, scr, 1))
return -EINTR;
}
}
asmlinkage long asmlinkage long
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
long arg3, long arg4, long arg5, long arg6, long arg7, long arg3, long arg4, long arg5, long arg6, long arg7,
...@@ -477,10 +436,11 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse ...@@ -477,10 +436,11 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
* Note that `init' is a special process: it doesn't get signals it doesn't want to * Note that `init' is a special process: it doesn't get signals it doesn't want to
* handle. Thus you cannot kill init even with a SIGKILL even by mistake. * handle. Thus you cannot kill init even with a SIGKILL even by mistake.
*/ */
long void
ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ia64_do_signal (struct sigscratch *scr, long in_syscall)
{ {
struct k_sigaction ka; struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info; siginfo_t info;
long restart = in_syscall; long restart = in_syscall;
long errno = scr->pt.r8; long errno = scr->pt.r8;
...@@ -492,9 +452,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -492,9 +452,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
* doing anything if so. * doing anything if so.
*/ */
if (!user_mode(&scr->pt)) if (!user_mode(&scr->pt))
return 0; return;
if (!oldset) if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked; oldset = &current->blocked;
/* /*
...@@ -557,8 +519,15 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -557,8 +519,15 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
* Whee! Actually deliver the signal. If the delivery failed, we need to * Whee! Actually deliver the signal. If the delivery failed, we need to
* continue to iterate in this loop so we can deliver the SIGSEGV... * continue to iterate in this loop so we can deliver the SIGSEGV...
*/ */
if (handle_signal(signr, &ka, &info, oldset, scr)) if (handle_signal(signr, &ka, &info, oldset, scr)) {
return 1; /* a signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag */
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
} }
/* Did we come from a system call? */ /* Did we come from a system call? */
...@@ -584,5 +553,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -584,5 +553,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
} }
} }
} }
return 0;
/* if there's no signal to deliver, we just put the saved sigmask
* back */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
} }
...@@ -49,6 +49,18 @@ ...@@ -49,6 +49,18 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/mca.h> #include <asm/mca.h>
/*
* Note: alignment of 4 entries/cacheline was empirically determined
* to be a good tradeoff between hot cachelines & spreading the array
* across too many cacheline.
*/
static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
/* /*
* Structure and data for smp_call_function(). This is designed to minimise static memory * Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner. * requirements. It also looks cleaner.
...@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu) ...@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
} }
/*
* Called with preeemption disabled.
*/
static void
smp_send_local_flush_tlb (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
}
void
smp_local_flush_tlb(void)
{
/*
* Use atomic ops. Otherwise, the load/increment/store sequence from
* a "++" operation can have the line stolen between the load & store.
* The overhead of the atomic op in negligible in this case & offers
* significant benefit for the brief periods where lots of cpus
* are simultaneously flushing TLBs.
*/
ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
local_flush_tlb_all();
}
#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
void
smp_flush_tlb_cpumask(cpumask_t xcpumask)
{
unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
cpumask_t cpumask = xcpumask;
int mycpu, cpu, flush_mycpu = 0;
preempt_disable();
mycpu = smp_processor_id();
for_each_cpu_mask(cpu, cpumask)
counts[cpu] = local_tlb_flush_counts[cpu].count;
mb();
for_each_cpu_mask(cpu, cpumask) {
if (cpu == mycpu)
flush_mycpu = 1;
else
smp_send_local_flush_tlb(cpu);
}
if (flush_mycpu)
smp_local_flush_tlb();
for_each_cpu_mask(cpu, cpumask)
while(counts[cpu] == local_tlb_flush_counts[cpu].count)
udelay(FLUSH_DELAY);
preempt_enable();
}
void void
smp_flush_tlb_all (void) smp_flush_tlb_all (void)
{ {
......
...@@ -43,9 +43,9 @@ die (const char *str, struct pt_regs *regs, long err) ...@@ -43,9 +43,9 @@ die (const char *str, struct pt_regs *regs, long err)
u32 lock_owner; u32 lock_owner;
int lock_owner_depth; int lock_owner_depth;
} die = { } die = {
.lock = SPIN_LOCK_UNLOCKED, .lock = __SPIN_LOCK_UNLOCKED(die.lock),
.lock_owner = -1, .lock_owner = -1,
.lock_owner_depth = 0 .lock_owner_depth = 0
}; };
static int die_counter; static int die_counter;
int cpu = get_cpu(); int cpu = get_cpu();
......
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
# define UNW_DEBUG_ON(n) unw_debug_level >= n # define UNW_DEBUG_ON(n) unw_debug_level >= n
/* Do not code a printk level, not all debug lines end in newline */ /* Do not code a printk level, not all debug lines end in newline */
# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
# undef inline
# define inline # define inline
#else /* !UNW_DEBUG */ #else /* !UNW_DEBUG */
# define UNW_DEBUG_ON(n) 0 # define UNW_DEBUG_ON(n) 0
...@@ -145,7 +146,7 @@ static struct { ...@@ -145,7 +146,7 @@ static struct {
# endif # endif
} unw = { } unw = {
.tables = &unw.kernel_table, .tables = &unw.kernel_table,
.lock = SPIN_LOCK_UNLOCKED, .lock = __SPIN_LOCK_UNLOCKED(unw.lock),
.save_order = { .save_order = {
UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
...@@ -1943,9 +1944,9 @@ EXPORT_SYMBOL(unw_unwind); ...@@ -1943,9 +1944,9 @@ EXPORT_SYMBOL(unw_unwind);
int int
unw_unwind_to_user (struct unw_frame_info *info) unw_unwind_to_user (struct unw_frame_info *info)
{ {
unsigned long ip, sp, pr = 0; unsigned long ip, sp, pr = info->pr;
while (unw_unwind(info) >= 0) { do {
unw_get_sp(info, &sp); unw_get_sp(info, &sp);
if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) { < IA64_PT_REGS_SIZE) {
...@@ -1963,7 +1964,7 @@ unw_unwind_to_user (struct unw_frame_info *info) ...@@ -1963,7 +1964,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
__FUNCTION__, ip); __FUNCTION__, ip);
return -1; return -1;
} }
} } while (unw_unwind(info) >= 0);
unw_get_ip(info, &ip); unw_get_ip(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
__FUNCTION__, ip); __FUNCTION__, ip);
......
...@@ -32,9 +32,9 @@ static struct { ...@@ -32,9 +32,9 @@ static struct {
} purge; } purge;
struct ia64_ctx ia64_ctx = { struct ia64_ctx ia64_ctx = {
.lock = SPIN_LOCK_UNLOCKED, .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
.next = 1, .next = 1,
.max_ctx = ~0U .max_ctx = ~0U
}; };
DEFINE_PER_CPU(u8, ia64_need_tlb_flush); DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
......
...@@ -59,6 +59,22 @@ void sn_intr_free(nasid_t local_nasid, int local_widget, ...@@ -59,6 +59,22 @@ void sn_intr_free(nasid_t local_nasid, int local_widget,
(u64) sn_irq_info->irq_cookie, 0, 0); (u64) sn_irq_info->irq_cookie, 0, 0);
} }
u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
nasid_t req_nasid, int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_REDIRECT, (u64) local_nasid,
(u64) local_widget, __pa(sn_irq_info),
(u64) req_nasid, (u64) req_slice, 0);
return ret_stuff.status;
}
static unsigned int sn_startup_irq(unsigned int irq) static unsigned int sn_startup_irq(unsigned int irq)
{ {
return 0; return 0;
...@@ -127,15 +143,8 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, ...@@ -127,15 +143,8 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
struct sn_irq_info *new_irq_info; struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *pci_provider; struct sn_pcibus_provider *pci_provider;
new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); bridge = (u64) sn_irq_info->irq_bridge;
if (new_irq_info == NULL)
return NULL;
memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
bridge = (u64) new_irq_info->irq_bridge;
if (!bridge) { if (!bridge) {
kfree(new_irq_info);
return NULL; /* irq is not a device interrupt */ return NULL; /* irq is not a device interrupt */
} }
...@@ -145,8 +154,25 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, ...@@ -145,8 +154,25 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
local_widget = TIO_SWIN_WIDGETNUM(bridge); local_widget = TIO_SWIN_WIDGETNUM(bridge);
else else
local_widget = SWIN_WIDGETNUM(bridge); local_widget = SWIN_WIDGETNUM(bridge);
vector = sn_irq_info->irq_irq; vector = sn_irq_info->irq_irq;
/* Make use of SAL_INTR_REDIRECT if PROM supports it */
status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
if (!status) {
new_irq_info = sn_irq_info;
goto finish_up;
}
/*
* PROM does not support SAL_INTR_REDIRECT, or it failed.
* Revert to old method.
*/
new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
if (new_irq_info == NULL)
return NULL;
memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
/* Free the old PROM new_irq_info structure */ /* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info); sn_intr_free(local_nasid, local_widget, new_irq_info);
unregister_intr_pda(new_irq_info); unregister_intr_pda(new_irq_info);
...@@ -162,11 +188,18 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, ...@@ -162,11 +188,18 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
return NULL; return NULL;
} }
register_intr_pda(new_irq_info);
spin_lock(&sn_irq_info_lock);
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
spin_unlock(&sn_irq_info_lock);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
finish_up:
/* Update kernels new_irq_info with new target info */ /* Update kernels new_irq_info with new target info */
cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
new_irq_info->irq_slice); new_irq_info->irq_slice);
new_irq_info->irq_cpuid = cpuid; new_irq_info->irq_cpuid = cpuid;
register_intr_pda(new_irq_info);
pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
...@@ -178,11 +211,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, ...@@ -178,11 +211,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
pci_provider && pci_provider->target_interrupt) pci_provider && pci_provider->target_interrupt)
(pci_provider->target_interrupt)(new_irq_info); (pci_provider->target_interrupt)(new_irq_info);
spin_lock(&sn_irq_info_lock);
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
spin_unlock(&sn_irq_info_lock);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpuid); cpuphys = cpu_physical_id(cpuid);
set_irq_affinity_info((vector & 0xff), cpuphys, 0); set_irq_affinity_info((vector & 0xff), cpuphys, 0);
......
...@@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats); ...@@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
static int sn2_flush_opt = 0;
extern unsigned long extern unsigned long
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long,
...@@ -76,6 +79,8 @@ struct ptc_stats { ...@@ -76,6 +79,8 @@ struct ptc_stats {
unsigned long shub_itc_clocks; unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max; unsigned long shub_itc_clocks_max;
unsigned long shub_ptc_flushes_not_my_mm; unsigned long shub_ptc_flushes_not_my_mm;
unsigned long shub_ipi_flushes;
unsigned long shub_ipi_flushes_itc_clocks;
}; };
#define sn2_ptctest 0 #define sn2_ptctest 0
...@@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm) ...@@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm)
flush_tlb_mm(mm); flush_tlb_mm(mm);
} }
static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{
unsigned long itc;
itc = ia64_get_itc();
smp_flush_tlb_cpumask(mm->cpu_vm_mask);
itc = ia64_get_itc() - itc;
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
__get_cpu_var(ptcstats).shub_ipi_flushes++;
}
/** /**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range * sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @mm: mm_struct containing virtual address range * @mm: mm_struct containing virtual address range
...@@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nasids[MAX_NUMNODES], nix; short nasids[MAX_NUMNODES], nix;
nodemask_t nodes_flushed; nodemask_t nodes_flushed;
int active, max_active, deadlock; int active, max_active, deadlock, flush_opt = sn2_flush_opt;
if (flush_opt > 2) {
sn2_ipi_flush_all_tlb(mm);
return;
}
nodes_clear(nodes_flushed); nodes_clear(nodes_flushed);
i = 0; i = 0;
...@@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
return; return;
} }
if (flush_opt == 2) {
sn2_ipi_flush_all_tlb(mm);
preempt_enable();
return;
}
itc = ia64_get_itc(); itc = ia64_get_itc();
nix = 0; nix = 0;
for_each_node_mask(cnode, nodes_flushed) for_each_node_mask(cnode, nodes_flushed)
...@@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
} }
if (active >= max_active || i == (nix - 1)) { if (active >= max_active || i == (nix - 1)) {
if ((deadlock = wait_piowc())) { if ((deadlock = wait_piowc())) {
if (flush_opt == 1)
goto done;
sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
if (reset_max_active_on_deadlock()) if (reset_max_active_on_deadlock())
max_active = 1; max_active = 1;
...@@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
start += (1UL << nbits); start += (1UL << nbits);
} while (start < end); } while (start < end);
done:
itc2 = ia64_get_itc() - itc2; itc2 = ia64_get_itc() - itc2;
__get_cpu_var(ptcstats).shub_itc_clocks += itc2; __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
...@@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
spin_unlock_irqrestore(PTC_LOCK(shub1), flags); spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
if (flush_opt == 1 && deadlock) {
__get_cpu_var(ptcstats).deadlocks++;
sn2_ipi_flush_all_tlb(mm);
}
preempt_enable(); preempt_enable();
} }
...@@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) ...@@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
if (!cpu) { if (!cpu) {
seq_printf(file, seq_printf(file,
"# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n"); "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
seq_printf(file, "# ptctest %d\n", sn2_ptctest); seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
} }
if (cpu < NR_CPUS && cpu_online(cpu)) { if (cpu < NR_CPUS && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu); stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks, stat->deadlocks,
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm, stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2); stat->deadlocks2,
stat->shub_ipi_flushes,
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
} }
return 0; return 0;
} }
static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
{
int cpu;
char optstr[64];
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
for_each_online_cpu(cpu)
memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
return count;
}
static struct seq_operations sn2_ptc_seq_ops = { static struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start, .start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next, .next = sn2_ptc_seq_next,
...@@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file) ...@@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
static const struct file_operations proc_sn2_ptc_operations = { static const struct file_operations proc_sn2_ptc_operations = {
.open = sn2_ptc_proc_open, .open = sn2_ptc_proc_open,
.read = seq_read, .read = seq_read,
.write = sn2_ptc_proc_write,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
......
...@@ -705,15 +705,13 @@ static int __init mmtimer_init(void) ...@@ -705,15 +705,13 @@ static int __init mmtimer_init(void)
maxn++; maxn++;
/* Allocate list of node ptrs to mmtimer_t's */ /* Allocate list of node ptrs to mmtimer_t's */
timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
if (timers == NULL) { if (timers == NULL) {
printk(KERN_ERR "%s: failed to allocate memory for device\n", printk(KERN_ERR "%s: failed to allocate memory for device\n",
MMTIMER_NAME); MMTIMER_NAME);
goto out3; goto out3;
} }
memset(timers,0,(sizeof(mmtimer_t *)*maxn));
/* Allocate mmtimer_t's for each online node */ /* Allocate mmtimer_t's for each online node */
for_each_online_node(node) { for_each_online_node(node) {
timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
......
...@@ -66,6 +66,7 @@ extern int ia64_last_device_vector; ...@@ -66,6 +66,7 @@ extern int ia64_last_device_vector;
#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ #define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ #define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
......
...@@ -83,7 +83,7 @@ extern int gsi_to_irq (unsigned int gsi); ...@@ -83,7 +83,7 @@ extern int gsi_to_irq (unsigned int gsi);
extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
unsigned long trigger); unsigned long trigger);
extern void iosapic_unregister_intr (unsigned int irq); extern void iosapic_unregister_intr (unsigned int irq);
extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, extern void __devinit iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long polarity, unsigned long polarity,
unsigned long trigger); unsigned long trigger);
extern int __init iosapic_register_platform_intr (u32 int_type, extern int __init iosapic_register_platform_intr (u32 int_type,
......
...@@ -106,6 +106,7 @@ ...@@ -106,6 +106,7 @@
/* interrupt handling */ /* interrupt handling */
#define SAL_INTR_ALLOC 1 #define SAL_INTR_ALLOC 1
#define SAL_INTR_FREE 2 #define SAL_INTR_FREE 2
#define SAL_INTR_REDIRECT 3
/* /*
* operations available on the generic SN_SAL_SYSCTL_OP * operations available on the generic SN_SAL_SYSCTL_OP
......
...@@ -85,6 +85,7 @@ struct thread_info { ...@@ -85,6 +85,7 @@ struct thread_info {
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ #define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */ #define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 #define TIF_MEMDIE 17
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
...@@ -96,6 +97,7 @@ struct thread_info { ...@@ -96,6 +97,7 @@ struct thread_info {
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
...@@ -104,7 +106,7 @@ struct thread_info { ...@@ -104,7 +106,7 @@ struct thread_info {
#define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_FREEZE (1 << TIF_FREEZE)
/* "work to do on user-return" bits */ /* "work to do on user-return" bits */
#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_RESTORE_SIGMASK)
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
......
...@@ -27,9 +27,11 @@ extern void local_flush_tlb_all (void); ...@@ -27,9 +27,11 @@ extern void local_flush_tlb_all (void);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void); extern void smp_flush_tlb_all (void);
extern void smp_flush_tlb_mm (struct mm_struct *mm); extern void smp_flush_tlb_mm (struct mm_struct *mm);
extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
# define flush_tlb_all() smp_flush_tlb_all() # define flush_tlb_all() smp_flush_tlb_all()
#else #else
# define flush_tlb_all() local_flush_tlb_all() # define flush_tlb_all() local_flush_tlb_all()
# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
#endif #endif
static inline void static inline void
...@@ -94,6 +96,15 @@ flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end ...@@ -94,6 +96,15 @@ flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end
*/ */
} }
/*
* Flush the local TLB. Invoked from another cpu using an IPI.
*/
#ifdef CONFIG_SMP
void smp_local_flush_tlb(void);
#else
#define smp_local_flush_tlb()
#endif
#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
#endif /* _ASM_IA64_TLBFLUSH_H */ #endif /* _ASM_IA64_TLBFLUSH_H */
...@@ -283,7 +283,8 @@ ...@@ -283,7 +283,8 @@
#define __NR_readlinkat 1291 #define __NR_readlinkat 1291
#define __NR_fchmodat 1292 #define __NR_fchmodat 1292
#define __NR_faccessat 1293 #define __NR_faccessat 1293
/* 1294, 1295 reserved for pselect/ppoll */ #define __NR_pselect6 1294
#define __NR_ppoll 1295
#define __NR_unshare 1296 #define __NR_unshare 1296
#define __NR_splice 1297 #define __NR_splice 1297
#define __NR_set_robust_list 1298 #define __NR_set_robust_list 1298
...@@ -300,6 +301,7 @@ ...@@ -300,6 +301,7 @@
#define NR_syscalls 281 /* length of syscall table */ #define NR_syscalls 281 /* length of syscall table */
#define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
# define __ARCH_WANT_SYS_FADVISE64 # define __ARCH_WANT_SYS_FADVISE64
...@@ -310,6 +312,7 @@ ...@@ -310,6 +312,7 @@
# define __ARCH_WANT_SYS_OLDUMOUNT # define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_SIGPENDING # define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK # define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
# define __ARCH_WANT_COMPAT_SYS_TIME # define __ARCH_WANT_COMPAT_SYS_TIME
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册