提交 21511abd 编写于 作者: L Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] make pfm_get_task work with virtual pids
  [IA64] honor notify_die() returning NOTIFY_STOP
  [IA64] remove dead code: __cpu_{down,die} from !HOTPLUG_CPU
  [IA64] Appoint kvm/ia64 Maintainers
  [IA64] ia64_set_psr should use srlz.i
  [IA64] Export three symbols for module use
  [IA64] mca style cleanup
  [IA64] sn_hwperf semaphore to mutex
  [IA64] generalize attribute of fsyscall_gtod_data
  [IA64] efi.c Add /* never reached */ annotation
  [IA64] efi.c Spelling/punctuation fixes
  [IA64] Make efi.c mostly fit in 80 columns
  [IA64] aliasing-test: fix gcc warnings on non-ia64
  [IA64] Slim-down __clear_bit_unlock
  [IA64] Fix the order of atomic operations in restore_previous_kprobes on ia64
  [IA64] constify function pointer tables
  [IA64] fix userspace compile error in gcc_intrin.h
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <fcntl.h> #include <fcntl.h>
#include <fnmatch.h> #include <fnmatch.h>
#include <string.h> #include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <unistd.h> #include <unistd.h>
...@@ -65,7 +66,7 @@ int scan_tree(char *path, char *file, off_t offset, size_t length, int touch) ...@@ -65,7 +66,7 @@ int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
{ {
struct dirent **namelist; struct dirent **namelist;
char *name, *path2; char *name, *path2;
int i, n, r, rc, result = 0; int i, n, r, rc = 0, result = 0;
struct stat buf; struct stat buf;
n = scandir(path, &namelist, 0, alphasort); n = scandir(path, &namelist, 0, alphasort);
...@@ -113,7 +114,7 @@ int scan_tree(char *path, char *file, off_t offset, size_t length, int touch) ...@@ -113,7 +114,7 @@ int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
free(namelist[i]); free(namelist[i]);
} }
free(namelist); free(namelist);
return rc; return result;
} }
char buf[1024]; char buf[1024];
...@@ -149,7 +150,7 @@ int scan_rom(char *path, char *file) ...@@ -149,7 +150,7 @@ int scan_rom(char *path, char *file)
{ {
struct dirent **namelist; struct dirent **namelist;
char *name, *path2; char *name, *path2;
int i, n, r, rc, result = 0; int i, n, r, rc = 0, result = 0;
struct stat buf; struct stat buf;
n = scandir(path, &namelist, 0, alphasort); n = scandir(path, &namelist, 0, alphasort);
...@@ -180,7 +181,7 @@ int scan_rom(char *path, char *file) ...@@ -180,7 +181,7 @@ int scan_rom(char *path, char *file)
* important thing is that no MCA happened. * important thing is that no MCA happened.
*/ */
if (rc > 0) if (rc > 0)
fprintf(stderr, "PASS: %s read %ld bytes\n", path2, rc); fprintf(stderr, "PASS: %s read %d bytes\n", path2, rc);
else { else {
fprintf(stderr, "PASS: %s not readable\n", path2); fprintf(stderr, "PASS: %s not readable\n", path2);
return rc; return rc;
...@@ -201,10 +202,10 @@ int scan_rom(char *path, char *file) ...@@ -201,10 +202,10 @@ int scan_rom(char *path, char *file)
free(namelist[i]); free(namelist[i]);
} }
free(namelist); free(namelist);
return rc; return result;
} }
int main() int main(void)
{ {
int rc; int rc;
...@@ -256,4 +257,6 @@ int main() ...@@ -256,4 +257,6 @@ int main()
scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0); scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0);
scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1); scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1);
scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0); scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0);
return rc;
} }
...@@ -2249,6 +2249,15 @@ L: kvm-devel@lists.sourceforge.net ...@@ -2249,6 +2249,15 @@ L: kvm-devel@lists.sourceforge.net
W: kvm.sourceforge.net W: kvm.sourceforge.net
S: Supported S: Supported
KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
P: Anthony Xu
M: anthony.xu@intel.com
P: Xiantao Zhang
M: xiantao.zhang@intel.com
L: kvm-ia64-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
KEXEC KEXEC
P: Eric Biederman P: Eric Biederman
M: ebiederm@xmission.com M: ebiederm@xmission.com
......
...@@ -1875,7 +1875,7 @@ ioc_show(struct seq_file *s, void *v) ...@@ -1875,7 +1875,7 @@ ioc_show(struct seq_file *s, void *v)
return 0; return 0;
} }
static struct seq_operations ioc_seq_ops = { static const struct seq_operations ioc_seq_ops = {
.start = ioc_start, .start = ioc_start,
.next = ioc_next, .next = ioc_next,
.stop = ioc_stop, .stop = ioc_stop,
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "ia32priv.h" #include "ia32priv.h"
extern void die_if_kernel (char *str, struct pt_regs *regs, long err); extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
struct exec_domain ia32_exec_domain; struct exec_domain ia32_exec_domain;
struct page *ia32_shared_page[NR_CPUS]; struct page *ia32_shared_page[NR_CPUS];
...@@ -217,7 +217,8 @@ ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs) ...@@ -217,7 +217,8 @@ ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
{ {
siginfo_t siginfo; siginfo_t siginfo;
die_if_kernel("Bad IA-32 interrupt", regs, int_num); if (die_if_kernel("Bad IA-32 interrupt", regs, int_num))
return;
siginfo.si_signo = SIGTRAP; siginfo.si_signo = SIGTRAP;
siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */ siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
......
此差异已折叠。
...@@ -14,10 +14,10 @@ struct fsyscall_gtod_data_t { ...@@ -14,10 +14,10 @@ struct fsyscall_gtod_data_t {
u32 clk_shift; u32 clk_shift;
void *clk_fsys_mmio; void *clk_fsys_mmio;
cycle_t clk_cycle_last; cycle_t clk_cycle_last;
} __attribute__ ((aligned (L1_CACHE_BYTES))); } ____cacheline_aligned;
struct itc_jitter_data_t { struct itc_jitter_data_t {
int itc_jitter; int itc_jitter;
cycle_t itc_lastcycle; cycle_t itc_lastcycle;
} __attribute__ ((aligned (L1_CACHE_BYTES))); } ____cacheline_aligned;
...@@ -12,6 +12,9 @@ EXPORT_SYMBOL(memset); ...@@ -12,6 +12,9 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strlen);
#include<asm/pgtable.h>
EXPORT_SYMBOL_GPL(empty_zero_page);
#include <asm/checksum.h> #include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
EXPORT_SYMBOL(csum_ipv6_magic); EXPORT_SYMBOL(csum_ipv6_magic);
......
...@@ -381,9 +381,10 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) ...@@ -381,9 +381,10 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{ {
unsigned int i; unsigned int i;
i = atomic_sub_return(1, &kcb->prev_kprobe_index); i = atomic_read(&kcb->prev_kprobe_index);
__get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp; __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp;
kcb->kprobe_status = kcb->prev_kprobe[i].status; kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
atomic_sub(1, &kcb->prev_kprobe_index);
} }
static void __kprobes set_current_kprobe(struct kprobe *p, static void __kprobes set_current_kprobe(struct kprobe *p,
......
...@@ -2,61 +2,69 @@ ...@@ -2,61 +2,69 @@
* File: mca.c * File: mca.c
* Purpose: Generic MCA handling layer * Purpose: Generic MCA handling layer
* *
* Updated for latest kernel
* Copyright (C) 2003 Hewlett-Packard Co * Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Copyright (C) 2002 Dell Inc. * Copyright (C) 2002 Dell Inc.
* Copyright (C) Matt Domsch (Matt_Domsch@dell.com) * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
* *
* Copyright (C) 2002 Intel * Copyright (C) 2002 Intel
* Copyright (C) Jenna Hall (jenna.s.hall@intel.com) * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
* *
* Copyright (C) 2001 Intel * Copyright (C) 2001 Intel
* Copyright (C) Fred Lewis (frederick.v.lewis@intel.com) * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
* *
* Copyright (C) 2000 Intel * Copyright (C) 2000 Intel
* Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com) * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
* *
* Copyright (C) 1999, 2004 Silicon Graphics, Inc. * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander(vijay@engr.sgi.com) * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* *
* 03/04/15 D. Mosberger Added INIT backtrace support. * Copyright (C) 2006 FUJITSU LIMITED
* 02/03/25 M. Domsch GUID cleanups * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* *
* 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
* error flag, set SAL default return values, changed * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
* error record structure to linked list, added init call * added min save state dump, added INIT handler.
* to sal_get_state_info_size().
* *
* 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
* platform errors, completed code for logging of * Added setup of CMCI and CPEI IRQs, logging of corrected platform
* corrected & uncorrected machine check errors, and * errors, completed code for logging of corrected & uncorrected
* updated for conformance with Nov. 2000 revision of the * machine check errors, and updated for conformance with Nov. 2000
* SAL 3.0 spec. * revision of the SAL 3.0 spec.
* 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, *
* added min save state dump, added INIT handler. * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
* Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
* set SAL default return values, changed error record structure to
* linked list, added init call to sal_get_state_info_size().
*
* 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
* GUID cleanups.
*
* 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
* Added INIT backtrace support.
* *
* 2003-12-08 Keith Owens <kaos@sgi.com> * 2003-12-08 Keith Owens <kaos@sgi.com>
* smp_call_function() must not be called from interrupt context (can * smp_call_function() must not be called from interrupt context
* deadlock on tasklist_lock). Use keventd to call smp_call_function(). * (can deadlock on tasklist_lock).
* Use keventd to call smp_call_function().
* *
* 2004-02-01 Keith Owens <kaos@sgi.com> * 2004-02-01 Keith Owens <kaos@sgi.com>
* Avoid deadlock when using printk() for MCA and INIT records. * Avoid deadlock when using printk() for MCA and INIT records.
* Delete all record printing code, moved to salinfo_decode in user space. * Delete all record printing code, moved to salinfo_decode in user
* Mark variables and functions static where possible. * space. Mark variables and functions static where possible.
* Delete dead variables and functions. * Delete dead variables and functions. Reorder to remove the need
* Reorder to remove the need for forward declarations and to consolidate * for forward declarations and to consolidate related code.
* related code.
* *
* 2005-08-12 Keith Owens <kaos@sgi.com> * 2005-08-12 Keith Owens <kaos@sgi.com>
* Convert MCA/INIT handlers to use per event stacks and SAL/OS state. * Convert MCA/INIT handlers to use per event stacks and SAL/OS
* state.
* *
* 2005-10-07 Keith Owens <kaos@sgi.com> * 2005-10-07 Keith Owens <kaos@sgi.com>
* Add notify_die() hooks. * Add notify_die() hooks.
* *
* 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Add printing support for MCA/INIT. * Add printing support for MCA/INIT.
* *
* 2007-04-27 Russ Anderson <rja@sgi.com> * 2007-04-27 Russ Anderson <rja@sgi.com>
* Support multiple cpus going through OS_MCA in the same event. * Support multiple cpus going through OS_MCA in the same event.
......
// /*
// assembly portion of the IA64 MCA handling * File: mca_asm.S
// * Purpose: assembly portion of the IA64 MCA handling
// Mods by cfleck to integrate into kernel build *
// 00/03/15 davidm Added various stop bits to get a clean compile * Mods by cfleck to integrate into kernel build
// *
// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
// kstack, switch modes, jump to C INIT handler * Added various stop bits to get a clean compile
// *
// 02/01/04 J.Hall <jenna.s.hall@intel.com> * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
// Before entering virtual mode code: * Added code to save INIT handoff state in pt_regs format,
// 1. Check for TLB CPU error * switch to temp kstack, switch modes, jump to C INIT handler
// 2. Restore current thread pointer to kr6 *
// 3. Move stack ptr 16 bytes to conform to C calling convention * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
// * Before entering virtual mode code:
// 04/11/12 Russ Anderson <rja@sgi.com> * 1. Check for TLB CPU error
// Added per cpu MCA/INIT stack save areas. * 2. Restore current thread pointer to kr6
// * 3. Move stack ptr 16 bytes to conform to C calling convention
// 12/08/05 Keith Owens <kaos@sgi.com> *
// Use per cpu MCA/INIT stacks for all data. * 2004-11-12 Russ Anderson <rja@sgi.com>
// * Added per cpu MCA/INIT stack save areas.
*
* 2005-12-08 Keith Owens <kaos@sgi.com>
* Use per cpu MCA/INIT stacks for all data.
*/
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Purpose: Generic MCA handling layer * Purpose: Generic MCA handling layer
* *
* Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Copyright (C) 2005 Silicon Graphics, Inc * Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com> * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
* Copyright (C) 2006 Russ Anderson <rja@sgi.com> * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Purpose: Define helpers for Generic MCA handling * Purpose: Define helpers for Generic MCA handling
* *
* Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*/ */
/* /*
* Processor error section: * Processor error section:
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Purpose: Assembly portion of Generic MCA handling * Purpose: Assembly portion of Generic MCA handling
* *
* Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*/ */
#include <linux/threads.h> #include <linux/threads.h>
......
...@@ -2654,11 +2654,11 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) ...@@ -2654,11 +2654,11 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
/* XXX: need to add more checks here */ /* XXX: need to add more checks here */
if (pid < 2) return -EPERM; if (pid < 2) return -EPERM;
if (pid != current->pid) { if (pid != task_pid_vnr(current)) {
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
p = find_task_by_pid(pid); p = find_task_by_vpid(pid);
/* make sure task cannot go away while we operate on it */ /* make sure task cannot go away while we operate on it */
if (p) get_task_struct(p); if (p) get_task_struct(p);
...@@ -5795,7 +5795,7 @@ pfm_proc_show(struct seq_file *m, void *v) ...@@ -5795,7 +5795,7 @@ pfm_proc_show(struct seq_file *m, void *v)
return 0; return 0;
} }
struct seq_operations pfm_seq_ops = { const struct seq_operations pfm_seq_ops = {
.start = pfm_proc_start, .start = pfm_proc_start,
.next = pfm_proc_next, .next = pfm_proc_next,
.stop = pfm_proc_stop, .stop = pfm_proc_stop,
......
...@@ -284,6 +284,7 @@ ia64_sal_cache_flush (u64 cache_type) ...@@ -284,6 +284,7 @@ ia64_sal_cache_flush (u64 cache_type)
SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
return isrv.status; return isrv.status;
} }
EXPORT_SYMBOL_GPL(ia64_sal_cache_flush);
void __init void __init
ia64_sal_init (struct ia64_sal_systab *systab) ia64_sal_init (struct ia64_sal_systab *systab)
...@@ -372,3 +373,16 @@ ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc, ...@@ -372,3 +373,16 @@ ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
return 0; return 0;
} }
EXPORT_SYMBOL(ia64_sal_oemcall_reentrant); EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
long
ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
*ticks_per_second = isrv.v0;
*drift_info = isrv.v1;
return isrv.status;
}
EXPORT_SYMBOL_GPL(ia64_sal_freq_base);
...@@ -654,7 +654,7 @@ c_stop (struct seq_file *m, void *v) ...@@ -654,7 +654,7 @@ c_stop (struct seq_file *m, void *v)
{ {
} }
struct seq_operations cpuinfo_op = { const struct seq_operations cpuinfo_op = {
.start = c_start, .start = c_start,
.next = c_next, .next = c_next,
.stop = c_stop, .stop = c_stop,
......
...@@ -767,17 +767,6 @@ void __cpu_die(unsigned int cpu) ...@@ -767,17 +767,6 @@ void __cpu_die(unsigned int cpu)
} }
printk(KERN_ERR "CPU %u didn't die...\n", cpu); printk(KERN_ERR "CPU %u didn't die...\n", cpu);
} }
#else /* !CONFIG_HOTPLUG_CPU */
int __cpu_disable(void)
{
return -ENOSYS;
}
void __cpu_die(unsigned int cpu)
{
/* We said "no" in __cpu_disable */
BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
void void
......
...@@ -35,7 +35,7 @@ trap_init (void) ...@@ -35,7 +35,7 @@ trap_init (void)
fpswa_interface = __va(ia64_boot_param->fpswa); fpswa_interface = __va(ia64_boot_param->fpswa);
} }
void int
die (const char *str, struct pt_regs *regs, long err) die (const char *str, struct pt_regs *regs, long err)
{ {
static struct { static struct {
...@@ -62,8 +62,11 @@ die (const char *str, struct pt_regs *regs, long err) ...@@ -62,8 +62,11 @@ die (const char *str, struct pt_regs *regs, long err)
if (++die.lock_owner_depth < 3) { if (++die.lock_owner_depth < 3) {
printk("%s[%d]: %s %ld [%d]\n", printk("%s[%d]: %s %ld [%d]\n",
current->comm, task_pid_nr(current), str, err, ++die_counter); current->comm, task_pid_nr(current), str, err, ++die_counter);
(void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
show_regs(regs); != NOTIFY_STOP)
show_regs(regs);
else
regs = NULL;
} else } else
printk(KERN_ERR "Recursive die() failure, output suppressed\n"); printk(KERN_ERR "Recursive die() failure, output suppressed\n");
...@@ -72,17 +75,22 @@ die (const char *str, struct pt_regs *regs, long err) ...@@ -72,17 +75,22 @@ die (const char *str, struct pt_regs *regs, long err)
add_taint(TAINT_DIE); add_taint(TAINT_DIE);
spin_unlock_irq(&die.lock); spin_unlock_irq(&die.lock);
if (!regs)
return 1;
if (panic_on_oops) if (panic_on_oops)
panic("Fatal exception"); panic("Fatal exception");
do_exit(SIGSEGV); do_exit(SIGSEGV);
return 0;
} }
void int
die_if_kernel (char *str, struct pt_regs *regs, long err) die_if_kernel (char *str, struct pt_regs *regs, long err)
{ {
if (!user_mode(regs)) if (!user_mode(regs))
die(str, regs, err); return die(str, regs, err);
return 0;
} }
void void
...@@ -102,7 +110,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -102,7 +110,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
== NOTIFY_STOP) == NOTIFY_STOP)
return; return;
die_if_kernel("bugcheck!", regs, break_num); if (die_if_kernel("bugcheck!", regs, break_num))
return;
sig = SIGILL; code = ILL_ILLOPC; sig = SIGILL; code = ILL_ILLOPC;
break; break;
...@@ -155,8 +164,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -155,8 +164,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
break; break;
default: default:
if (break_num < 0x40000 || break_num > 0x100000) if ((break_num < 0x40000 || break_num > 0x100000)
die_if_kernel("Bad break", regs, break_num); && die_if_kernel("Bad break", regs, break_num))
return;
if (break_num < 0x80000) { if (break_num < 0x80000) {
sig = SIGILL; code = __ILL_BREAK; sig = SIGILL; code = __ILL_BREAK;
...@@ -402,14 +412,15 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, ...@@ -402,14 +412,15 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
#endif #endif
sprintf(buf, "IA-64 Illegal operation fault"); sprintf(buf, "IA-64 Illegal operation fault");
die_if_kernel(buf, &regs, 0); rv.fkt = 0;
if (die_if_kernel(buf, &regs, 0))
return rv;
memset(&si, 0, sizeof(si)); memset(&si, 0, sizeof(si));
si.si_signo = SIGILL; si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC; si.si_code = ILL_ILLOPC;
si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri); si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
force_sig_info(SIGILL, &si, current); force_sig_info(SIGILL, &si, current);
rv.fkt = 0;
return rv; return rv;
} }
...@@ -644,6 +655,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -644,6 +655,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
sprintf(buf, "Fault %lu", vector); sprintf(buf, "Fault %lu", vector);
break; break;
} }
die_if_kernel(buf, &regs, error); if (!die_if_kernel(buf, &regs, error))
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
extern void die_if_kernel(char *str, struct pt_regs *regs, long err); extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
#undef DEBUG_UNALIGNED_TRAP #undef DEBUG_UNALIGNED_TRAP
...@@ -675,8 +675,9 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi ...@@ -675,8 +675,9 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
*/ */
if (ld.x6_op == 1 || ld.x6_op == 3) { if (ld.x6_op == 1 || ld.x6_op == 3) {
printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
die_if_kernel("unaligned reference on speculative load with register update\n", if (die_if_kernel("unaligned reference on speculative load with register update\n",
regs, 30); regs, 30))
return;
} }
...@@ -1317,7 +1318,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1317,7 +1318,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (ia64_psr(regs)->be) { if (ia64_psr(regs)->be) {
/* we don't support big-endian accesses */ /* we don't support big-endian accesses */
die_if_kernel("big-endian unaligned accesses are not supported", regs, 0); if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0))
return;
goto force_sigbus; goto force_sigbus;
} }
...@@ -1534,7 +1536,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1534,7 +1536,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
ia64_handle_exception(regs, eh); ia64_handle_exception(regs, eh);
goto done; goto done;
} }
die_if_kernel("error during unaligned kernel access\n", regs, ret); if (die_if_kernel("error during unaligned kernel access\n", regs, ret))
return;
/* NOT_REACHED */ /* NOT_REACHED */
} }
force_sigbus: force_sigbus:
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
extern void die (char *, struct pt_regs *, long); extern int die(char *, struct pt_regs *, long);
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, int trap) static inline int notify_page_fault(struct pt_regs *regs, int trap)
...@@ -267,9 +267,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -267,9 +267,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
else else
printk(KERN_ALERT "Unable to handle kernel paging request at " printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address); "virtual address %016lx\n", address);
die("Oops", regs, isr); if (die("Oops", regs, isr))
regs = NULL;
bust_spinlocks(0); bust_spinlocks(0);
do_exit(SIGKILL); if (regs)
do_exit(SIGKILL);
return; return;
out_of_memory: out_of_memory:
......
...@@ -523,7 +523,7 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si ...@@ -523,7 +523,7 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si
return count; return count;
} }
static struct seq_operations sn2_ptc_seq_ops = { static const struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start, .start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next, .next = sn2_ptc_seq_next,
.stop = sn2_ptc_seq_stop, .stop = sn2_ptc_seq_stop,
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/mutex.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/topology.h> #include <asm/topology.h>
...@@ -50,7 +51,7 @@ static void *sn_hwperf_salheap = NULL; ...@@ -50,7 +51,7 @@ static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0; static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID; static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void); static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex); static DEFINE_MUTEX(sn_hwperf_init_mutex);
#define cnode_possible(n) ((n) < num_cnodes) #define cnode_possible(n) ((n) < num_cnodes)
...@@ -577,7 +578,7 @@ static void sn_topology_stop(struct seq_file *m, void *v) ...@@ -577,7 +578,7 @@ static void sn_topology_stop(struct seq_file *m, void *v)
/* /*
* /proc/sgi_sn/sn_topology, read-only using seq_file * /proc/sgi_sn/sn_topology, read-only using seq_file
*/ */
static struct seq_operations sn_topology_seq_ops = { static const struct seq_operations sn_topology_seq_ops = {
.start = sn_topology_start, .start = sn_topology_start,
.next = sn_topology_next, .next = sn_topology_next,
.stop = sn_topology_stop, .stop = sn_topology_stop,
...@@ -884,10 +885,10 @@ static int sn_hwperf_init(void) ...@@ -884,10 +885,10 @@ static int sn_hwperf_init(void)
int e = 0; int e = 0;
/* single threaded, once-only initialization */ /* single threaded, once-only initialization */
down(&sn_hwperf_init_mutex); mutex_lock(&sn_hwperf_init_mutex);
if (sn_hwperf_salheap) { if (sn_hwperf_salheap) {
up(&sn_hwperf_init_mutex); mutex_unlock(&sn_hwperf_init_mutex);
return e; return e;
} }
...@@ -936,7 +937,7 @@ static int sn_hwperf_init(void) ...@@ -936,7 +937,7 @@ static int sn_hwperf_init(void)
sn_hwperf_salheap = NULL; sn_hwperf_salheap = NULL;
sn_hwperf_obj_cnt = 0; sn_hwperf_obj_cnt = 0;
} }
up(&sn_hwperf_init_mutex); mutex_unlock(&sn_hwperf_init_mutex);
return e; return e;
} }
......
...@@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr) ...@@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr)
} }
/** /**
* __clear_bit_unlock - Non-atomically clear a bit with release * __clear_bit_unlock - Non-atomically clears a bit in memory with release
* @nr: Bit to clear
* @addr: Address to start counting from
* *
* This is like clear_bit_unlock, but the implementation uses a store * Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also __raw_spin_unlock(). * with release semantics. See also __raw_spin_unlock().
*/ */
static __inline__ void static __inline__ void
__clear_bit_unlock(int nr, volatile void *addr) __clear_bit_unlock(int nr, void *addr)
{ {
__u32 mask, new; __u32 * const m = (__u32 *) addr + (nr >> 5);
volatile __u32 *m; __u32 const new = *m & ~(1 << (nr & 31));
m = (volatile __u32 *)addr + (nr >> 5);
mask = ~(1 << (nr & 31));
new = *m & mask;
barrier();
ia64_st4_rel_nta(m, new); ia64_st4_rel_nta(m, new);
} }
/** /**
* __clear_bit - Clears a bit in memory (non-atomic version) * __clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/ */
static __inline__ void static __inline__ void
__clear_bit (int nr, volatile void *addr) __clear_bit (int nr, volatile void *addr)
{ {
volatile __u32 *p = (__u32 *) addr + (nr >> 5); *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
__u32 m = 1 << (nr & 31);
*p &= ~m;
} }
/** /**
* change_bit - Toggle a bit in memory * change_bit - Toggle a bit in memory
* @nr: Bit to clear * @nr: Bit to toggle
* @addr: Address to start counting from * @addr: Address to start counting from
* *
* change_bit() is atomic and may not be reordered. * change_bit() is atomic and may not be reordered.
...@@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr) ...@@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr)
/** /**
* __change_bit - Toggle a bit in memory * __change_bit - Toggle a bit in memory
* @nr: the bit to set * @nr: the bit to toggle
* @addr: the address to start counting from * @addr: the address to start counting from
* *
* Unlike change_bit(), this function is non-atomic and may be reordered. * Unlike change_bit(), this function is non-atomic and may be reordered.
...@@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr) ...@@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr)
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies the acquisition side of the memory barrier.
*/ */
static __inline__ int static __inline__ int
test_and_set_bit (int nr, volatile void *addr) test_and_set_bit (int nr, volatile void *addr)
...@@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr) ...@@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr)
/** /**
* test_and_clear_bit - Clear a bit and return its old value * test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies the acquisition side of the memory barrier.
*/ */
static __inline__ int static __inline__ int
test_and_clear_bit (int nr, volatile void *addr) test_and_clear_bit (int nr, volatile void *addr)
...@@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr) ...@@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr)
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic and can be reordered. * This operation is non-atomic and can be reordered.
...@@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr) ...@@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr)
/** /**
* test_and_change_bit - Change a bit and return its old value * test_and_change_bit - Change a bit and return its old value
* @nr: Bit to set * @nr: Bit to change
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies the acquisition side of the memory barrier.
*/ */
static __inline__ int static __inline__ int
test_and_change_bit (int nr, volatile void *addr) test_and_change_bit (int nr, volatile void *addr)
...@@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr) ...@@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr)
return (old & bit) != 0; return (old & bit) != 0;
} }
/* /**
* WARNING: non atomic version. * __test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/ */
static __inline__ int static __inline__ int
__test_and_change_bit (int nr, void *addr) __test_and_change_bit (int nr, void *addr)
......
...@@ -24,7 +24,9 @@ ...@@ -24,7 +24,9 @@
extern void ia64_bad_param_for_setreg (void); extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void); extern void ia64_bad_param_for_getreg (void);
#ifdef __KERNEL__
register unsigned long ia64_r13 asm ("r13") __used; register unsigned long ia64_r13 asm ("r13") __used;
#endif
#define ia64_setreg(regnum, val) \ #define ia64_setreg(regnum, val) \
({ \ ({ \
......
...@@ -3,9 +3,9 @@ ...@@ -3,9 +3,9 @@
* Purpose: Machine check handling specific defines * Purpose: Machine check handling specific defines
* *
* Copyright (C) 1999, 2004 Silicon Graphics, Inc. * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander (vijay@engr.sgi.com) * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Russ Anderson (rja@sgi.com) * Copyright (C) Russ Anderson <rja@sgi.com>
*/ */
#ifndef _ASM_IA64_MCA_H #ifndef _ASM_IA64_MCA_H
......
/* /*
* File: mca_asm.h * File: mca_asm.h
* Purpose: Machine check handling specific defines
* *
* Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander (vijay@engr.sgi.com) * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
......
...@@ -473,7 +473,7 @@ ia64_set_psr (__u64 psr) ...@@ -473,7 +473,7 @@ ia64_set_psr (__u64 psr)
{ {
ia64_stop(); ia64_stop();
ia64_setreg(_IA64_REG_PSR_L, psr); ia64_setreg(_IA64_REG_PSR_L, psr);
ia64_srlz_d(); ia64_srlz_i();
} }
/* /*
......
...@@ -649,17 +649,6 @@ typedef struct err_rec { ...@@ -649,17 +649,6 @@ typedef struct err_rec {
* Now define a couple of inline functions for improved type checking * Now define a couple of inline functions for improved type checking
* and convenience. * and convenience.
*/ */
static inline long
ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
*ticks_per_second = isrv.v0;
*drift_info = isrv.v1;
return isrv.status;
}
extern s64 ia64_sal_cache_flush (u64 cache_type); extern s64 ia64_sal_cache_flush (u64 cache_type);
extern void __init check_sal_cache_flush (void); extern void __init check_sal_cache_flush (void);
...@@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64, ...@@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64); u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64); u64, u64, u64, u64, u64);
extern long
ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* /*
* System Abstraction Layer Specification * System Abstraction Layer Specification
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册