提交 29a41e9e 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6:
  parisc: export length of os_hpmc vector
  parisc: fix kernel crash (protection id trap) when compiling ruby1.9
  parisc: Use DEFINE_SPINLOCK
  parisc: add uevent helper for parisc bus
  parisc: fix ipv6 checksum
  parisc: quiet palo not-found message from "which"
  parisc: Replace NR_CPUS in parisc code
  parisc: trivial fixes
  parisc: fix braino in commit adding __space_to_prot
  parisc: factor out sid to protid conversion
  parisc: use leX_to_cpu in place of __fswabX
  parisc: fix GFP_KERNEL use while atomic in unwinder
  parisc: remove dead BIO_VMERGE_BOUNDARY and BIO_VMERGE_MAX_SIZE definitions
  parisc: set_time() catch errors
  parisc: use the new byteorder headers
  parisc: drivers/parisc/: make code static
  parisc: lib/: make code static
......@@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
PALO := $(shell if which palo; then : ; \
PALO := $(shell if (which palo 2>&1); then : ; \
elif [ -x /sbin/palo ]; then echo /sbin/palo; \
fi)
......
......@@ -4,9 +4,10 @@
#include <asm/types.h>
#include <linux/compiler.h>
#ifdef __GNUC__
#define __BIG_ENDIAN
#define __SWAB_64_THRU_32__
static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
"shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
......@@ -14,8 +15,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
: "0" (x));
return x;
}
#define __arch_swab16 __arch_swab16
static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
{
__asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
"dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
......@@ -25,7 +27,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
return x;
}
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
unsigned int temp;
__asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
......@@ -35,7 +37,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
: "0" (x));
return x;
}
#define __arch_swab32 __arch_swab32
#if BITS_PER_LONG > 32
/*
......@@ -48,7 +50,8 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
** HSHR 67452301 -> *6*4*2*0 into %0
** OR %0 | %1 -> 76543210 into %0 (all done!)
*/
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__u64 temp;
__asm__("permh,3210 %0, %0\n\t"
"hshl %0, 8, %1\n\t"
......@@ -58,25 +61,9 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
: "0" (x));
return x;
}
#define __arch__swab64(x) ___arch__swab64(x)
#define __BYTEORDER_HAS_U64__
#elif !defined(__STRICT_ANSI__)
static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
__u32 t1 = ___arch__swab32((__u32) x);
__u32 t2 = ___arch__swab32((__u32) (x >> 32));
return (((__u64) t1 << 32) | t2);
}
#define __arch__swab64(x) ___arch__swab64(x)
#define __BYTEORDER_HAS_U64__
#endif
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab24(x) ___arch__swab24(x)
#define __arch__swab32(x) ___arch__swab32(x)
#endif /* __GNUC__ */
#define __arch_swab64 __arch_swab64
#endif /* BITS_PER_LONG > 32 */
#include <linux/byteorder/big_endian.h>
#include <linux/byteorder.h>
#endif /* _PARISC_BYTEORDER_H */
......@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
: "r19", "r20", "r21", "r22");
: "r19", "r20", "r21", "r22", "memory");
return csum_fold(sum);
}
......
......@@ -4,12 +4,6 @@
#include <linux/types.h>
#include <asm/pgtable.h>
extern unsigned long parisc_vmerge_boundary;
extern unsigned long parisc_vmerge_max_size;
#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary
#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
#define virt_to_bus virt_to_phys
......@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add
/* readb can never be const, so use __fswab instead of le*_to_cpu */
#define readb(addr) __raw_readb(addr)
#define readw(addr) __fswab16(__raw_readw(addr))
#define readl(addr) __fswab32(__raw_readl(addr))
#define readq(addr) __fswab64(__raw_readq(addr))
#define readw(addr) le16_to_cpu(__raw_readw(addr))
#define readl(addr) le32_to_cpu(__raw_readl(addr))
#define readq(addr) le64_to_cpu(__raw_readq(addr))
#define writeb(b, addr) __raw_writeb(b, addr)
#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
......
......@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm)
mm->context = 0;
}
static inline void load_context(mm_context_t context)
static inline unsigned long __space_to_prot(mm_context_t context)
{
mtsp(context, 3);
#if SPACEID_SHIFT == 0
mtctl(context << 1,8);
return context << 1;
#else
mtctl(context >> (SPACEID_SHIFT - 1),8);
return context >> (SPACEID_SHIFT - 1);
#endif
}
static inline void load_context(mm_context_t context)
{
mtsp(context, 3);
mtctl(__space_to_prot(context), 8);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
......
......@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/system.h>
#include <asm/percpu.h>
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
......@@ -109,8 +110,7 @@ struct cpuinfo_parisc {
};
extern struct system_cpuinfo_parisc boot_cpu_data;
extern struct cpuinfo_parisc cpu_data[NR_CPUS];
#define current_cpu_data cpu_data[smp_processor_id()]
DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
......
......@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
int fixup_exception(struct pt_regs *regs);
#endif /* __PARISC_UACCESS_H */
......@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv)
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
static ssize_t make_modalias(struct device *dev, char *buf)
{
const struct parisc_device *padev = to_parisc_device(dev);
const struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
}
static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct parisc_device *padev;
char modalias[40];
if (!dev)
return -ENODEV;
padev = to_parisc_device(dev);
if (!padev)
return -ENODEV;
if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
return -ENOMEM;
make_modalias(dev, modalias);
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
......@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct parisc_device *padev = to_parisc_device(dev);
struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
return make_modalias(dev, buf);
}
static struct device_attribute parisc_device_attrs[] = {
......@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = {
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.uevent = parisc_uevent,
.dev_attrs = parisc_device_attrs,
.probe = parisc_driver_probe,
.remove = parisc_driver_remove,
......
......@@ -80,6 +80,7 @@ END(hpmc_pim_data)
.import intr_save, code
ENTRY(os_hpmc)
.os_hpmc:
/*
* registers modified:
......@@ -295,5 +296,10 @@ os_hpmc_6:
b .
nop
ENDPROC(os_hpmc)
ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */
.os_hpmc_end:
nop
.data
.align 4
.export os_hpmc_size
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc
......@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
#endif
return cpu_data[cpu].txn_addr;
return per_cpu(cpu_data, cpu).txn_addr;
}
......@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
!cpu_online(next_cpu)))
while ((next_cpu < NR_CPUS) &&
(!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= NR_CPUS)
......@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
cpu_data[cpu].hpa);
per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
......@@ -421,5 +422,5 @@ void __init init_IRQ(void)
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ %d\n", irq);
printk(KERN_WARNING "unexpected IRQ %d\n", irq);
}
......@@ -52,7 +52,7 @@
#include <linux/tty.h>
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(pdc_console_lock);
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
......
......@@ -541,9 +541,9 @@ static int __init perf_init(void)
spin_lock_init(&perf_lock);
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
cpu_device = cpu_data[0].dev;
cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
cpu_data[0].dev->name);
per_cpu(cpu_data, 0).dev->name);
return 0;
}
......
......@@ -3,7 +3,7 @@
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
* Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
......@@ -46,7 +46,7 @@
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
extern int update_cr16_clocksource(void); /* from time.c */
......@@ -68,6 +68,23 @@ extern int update_cr16_clocksource(void); /* from time.c */
** The initialization of OS data structures is the same (done below).
*/
/**
* init_cpu_profiler - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
static void __cpuinit
init_percpu_prof(unsigned long cpunum)
{
struct cpuinfo_parisc *p;
p = &per_cpu(cpu_data, cpunum);
p->prof_counter = 1;
p->prof_multiplier = 1;
}
/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
......@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
}
#endif
p = &cpu_data[cpuid];
p = &per_cpu(cpu_data, cpuid);
boot_cpu_data.cpu_count++;
/* initialize counters - CPU 0 gets it_value set in time_init() */
......@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
** for boot_cpu by the above memset().
** for boot_cpu by the above memset().
*/
/* stolen from init_percpu_prof() */
cpu_data[cpuid].prof_counter = 1;
cpu_data[cpuid].prof_multiplier = 1;
init_percpu_prof(cpuid);
#endif
/*
......@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void)
}
/**
* init_cpu_profiler - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
static inline void __init
init_percpu_prof(int cpunum)
{
cpu_data[cpunum].prof_counter = 1;
cpu_data[cpunum].prof_multiplier = 1;
}
/**
* init_per_cpu - Handle individual processor initializations.
......@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum)
*
* o Enable CPU profiling hooks.
*/
int __init init_per_cpu(int cpunum)
int __cpuinit init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
......@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum)
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
cpu_data[cpunum].fp_rev = coproc_cfg.revision;
cpu_data[cpunum].fp_model = coproc_cfg.model;
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
......@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum)
int
show_cpuinfo (struct seq_file *m, void *v)
{
int n;
unsigned long cpu;
for(n=0; n<boot_cpu_data.cpu_count; n++) {
for_each_online_cpu(cpu) {
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
if (0 == cpu_data[n].hpa)
if (0 == cpuinfo->hpa)
continue;
#endif
seq_printf(m, "processor\t: %d\n"
seq_printf(m, "processor\t: %lu\n"
"cpu family\t: PA-RISC %s\n",
n, boot_cpu_data.family_name);
cpu, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
......@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v)
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name,
cpu_data[n].dev ?
cpu_data[n].dev->name : "Unknown" );
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
......@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v)
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
cpu_data[n].loops_per_jiffy / (500000 / HZ),
(cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
cpuinfo->loops_per_jiffy / (500000 / HZ),
(cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
......
......@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
/* This sets the vmerge boundary and size, it's here because it has to
* be available on all platforms (zero means no-virtual merging) */
unsigned long parisc_vmerge_boundary = 0;
unsigned long parisc_vmerge_max_size = 0;
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
......@@ -321,7 +316,7 @@ static int __init parisc_init(void)
processor_init();
printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
boot_cpu_data.cpu_count,
num_present_cpus(),
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
......@@ -387,8 +382,8 @@ void start_parisc(void)
if (ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10);
cpu_data[cpunum].fp_rev = coproc_cfg.revision;
cpu_data[cpunum].fp_model = coproc_cfg.model;
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
asm volatile ("fstd %fr0,8(%sp)");
} else {
......
......@@ -56,16 +56,17 @@ static int smp_debug_lvl = 0;
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
#define smp_debug(lvl, ...)
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
DEFINE_SPINLOCK(smp_lock);
volatile struct task_struct *smp_init_current_idle_task;
static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
/* track which CPU is booting */
static volatile int cpu_now_booting __cpuinitdata;
static int parisc_max_cpus __read_mostly = 1;
static int parisc_max_cpus __cpuinitdata = 1;
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
......@@ -123,7 +124,7 @@ irqreturn_t
ipi_interrupt(int irq, void *dev_id)
{
int this_cpu = smp_processor_id();
struct cpuinfo_parisc *p = &cpu_data[this_cpu];
struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
unsigned long ops;
unsigned long flags;
......@@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id)
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
struct cpuinfo_parisc *p = &cpu_data[cpu];
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
spin_unlock_irqrestore(lock, flags);
}
......@@ -224,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
if (dest_cpu == NO_PROC_ID) {
BUG();
return;
}
BUG_ON(dest_cpu == NO_PROC_ID);
ipi_send(dest_cpu, op);
}
......@@ -309,8 +307,7 @@ smp_cpu_init(int cpunum)
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if(current->mm)
BUG();
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
......@@ -345,6 +342,7 @@ void __init smp_callin(void)
*/
int __cpuinit smp_boot_one_cpu(int cpuid)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
struct task_struct *idle;
long timeout;
......@@ -376,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
smp_init_current_idle_task = idle ;
mb();
printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
......@@ -387,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/*
......@@ -419,12 +417,12 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
return 0;
}
void __devinit smp_prepare_boot_cpu(void)
void __init smp_prepare_boot_cpu(void)
{
int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
/* Setup BSP mappings */
printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
cpu_set(bootstrap_processor, cpu_online_map);
cpu_set(bootstrap_processor, cpu_present_map);
......
......@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
unsigned long cycles_elapsed, ticks_elapsed;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
......@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void)
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
cpu_data[cpu].it_value = next_tick;
per_cpu(cpu_data, cpu).it_value = next_tick;
}
struct platform_device rtc_parisc_dev = {
......
......@@ -22,14 +22,14 @@
#include <linux/cpu.h>
#include <linux/cache.h>
static struct cpu cpu_devices[NR_CPUS] __read_mostly;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int num;
for_each_present_cpu(num) {
register_cpu(&cpu_devices[num], num);
register_cpu(&per_cpu(cpu_devices, num), num);
}
return 0;
}
......
......@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs)
/* Fall Through */
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
fixup_exception(regs))
return;
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
......@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs)
int __init check_ivt(void *iva)
{
extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
extern const u32 os_hpmc_end[];
int i;
u32 check = 0;
......@@ -839,8 +843,7 @@ int __init check_ivt(void *iva)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
length = os_hpmc_end - os_hpmc;
length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
......
......@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
......
......@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = {
iomem_write32r,
};
const struct iomap_ops *iomap_ops[8] = {
static const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
[7] = &iomem_ops
};
......
......@@ -275,7 +275,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
......
......@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst)
}
#endif
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
d = &__get_cpu_var(exception_data);
d->fault_ip = regs->iaoq[0];
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
regs->iaoq[0] = ((fix->fixup) & ~3);
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
* don't want to take the branch, so we don't
* increment iaoq[1], instead we set it to be
* iaoq[0]+4, and clear the B bit in the PSW
*/
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
return 1;
}
return 0;
}
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const struct exception_table_entry *fix;
unsigned long acc_type;
int fault;
......@@ -229,32 +257,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
no_context:
if (!user_mode(regs)) {
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
struct exception_data *d;
d = &__get_cpu_var(exception_data);
d->fault_ip = regs->iaoq[0];
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
regs->iaoq[0] = ((fix->fixup) & ~3);
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
* don't want to take the branch, so we don't
* increment iaoq[1], instead we set it to be
* iaoq[0]+4, and clear the B bit in the PSW
*/
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
return;
}
if (!user_mode(regs) && fixup_exception(regs)) {
return;
}
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
......
......@@ -71,8 +71,7 @@ static void asp_choose_irq(struct parisc_device *dev, void *ctrl)
*/
#define ASP_INTERRUPT_ADDR 0xf0800000
int __init
asp_init_chip(struct parisc_device *dev)
static int __init asp_init_chip(struct parisc_device *dev)
{
struct gsc_irq gsc_irq;
int ret;
......
......@@ -555,7 +555,7 @@ static u32 hint_lookup[] = {
* (Load Coherence Index) instruction. The 8 bits used for the virtual
* index are bits 12:19 of the value returned by LCI.
*/
void CCIO_INLINE
static void CCIO_INLINE
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
......@@ -1578,8 +1578,6 @@ static int __init ccio_probe(struct parisc_device *dev)
ioc_count++;
parisc_vmerge_boundary = IOVP_SIZE;
parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
parisc_has_iommu();
return 0;
}
......
......@@ -287,7 +287,7 @@ DINO_PORT_OUT(b, 8, 3)
DINO_PORT_OUT(w, 16, 2)
DINO_PORT_OUT(l, 32, 0)
struct pci_port_ops dino_port_ops = {
static struct pci_port_ops dino_port_ops = {
.inb = dino_in8,
.inw = dino_in16,
.inl = dino_in32,
......@@ -690,7 +690,7 @@ dino_fixup_bus(struct pci_bus *bus)
}
struct pci_bios_ops dino_bios_ops = {
static struct pci_bios_ops dino_bios_ops = {
.init = dino_bios_init,
.fixup_bus = dino_fixup_bus
};
......
......@@ -29,7 +29,7 @@ struct hppb_card {
struct hppb_card *next;
};
struct hppb_card hppb_card_head = {
static struct hppb_card hppb_card_head = {
.hpa = 0,
.next = NULL,
};
......
......@@ -107,7 +107,7 @@ lasi_init_irq(struct gsc_asic *this_lasi)
#else
void __init lasi_led_init(unsigned long lasi_hpa)
static void __init lasi_led_init(unsigned long lasi_hpa)
{
unsigned long datareg;
......@@ -163,8 +163,7 @@ static void lasi_power_off(void)
gsc_writel(0x02, datareg);
}
int __init
lasi_init_chip(struct parisc_device *dev)
static int __init lasi_init_chip(struct parisc_device *dev)
{
extern void (*chassis_power_off)(void);
struct gsc_asic *lasi;
......
......@@ -824,7 +824,7 @@ lba_fixup_bus(struct pci_bus *bus)
}
struct pci_bios_ops lba_bios_ops = {
static struct pci_bios_ops lba_bios_ops = {
.init = lba_bios_init,
.fixup_bus = lba_fixup_bus,
};
......
......@@ -561,7 +561,7 @@ typedef unsigned long space_t;
* IOMMU uses little endian for the pdir.
*/
void SBA_INLINE
static void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
......@@ -1874,7 +1874,7 @@ static struct parisc_device_id sba_tbl[] = {
{ 0, }
};
int sba_driver_callback(struct parisc_device *);
static int sba_driver_callback(struct parisc_device *);
static struct parisc_driver sba_driver = {
.name = MODULE_NAME,
......@@ -1887,8 +1887,7 @@ static struct parisc_driver sba_driver = {
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
int
sba_driver_callback(struct parisc_device *dev)
static int sba_driver_callback(struct parisc_device *dev)
{
struct sba_device *sba_dev;
u32 func_class;
......@@ -1979,8 +1978,6 @@ sba_driver_callback(struct parisc_device *dev)
proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
#endif
parisc_vmerge_boundary = IOVP_SIZE;
parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
parisc_has_iommu();
return 0;
}
......
......@@ -68,8 +68,7 @@ wax_init_irq(struct gsc_asic *wax)
// gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
}
int __init
wax_init_chip(struct parisc_device *dev)
static int __init wax_init_chip(struct parisc_device *dev)
{
struct gsc_asic *wax;
struct parisc_device *parent;
......
......@@ -34,7 +34,8 @@ static int parisc_get_time(struct device *dev, struct rtc_time *tm)
static int parisc_set_time(struct device *dev, struct rtc_time *tm)
{
struct parisc_rtc *p = dev_get_drvdata(dev);
unsigned long flags, ret;
unsigned long flags;
int ret;
spin_lock_irqsave(&p->lock, flags);
ret = set_rtc_time(tm);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册