提交 8483ca3c 编写于 作者: L Linus Torvalds

Merge branch 'merge' of master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Make sure __cpu_preinit_ppc970 gets called on 970GX processors
  [POWERPC] Fix CHRP platforms with only 8259
  [POWERPC] IPIC: Fix spinlock recursion in set_irq_handler
  [POWERPC] Fix the UCC rx/tx clock of QE
  [POWERPC] cell: update defconfig
  [POWERPC] spufs: fix another off-by-one bug in spufs_mbox_read
  [POWERPC] spufs: fix signal2 file to report signal2
  [POWERPC] Fix device_is_compatible() const warning
  [POWERPC] Cell timebase bug workaround
  [POWERPC] Support feature fixups in modules
  [POWERPC] Support feature fixups in vdso's
  [POWERPC] Support nested cpu feature sections
  [POWERPC] Consolidate feature fixup code
  [POWERPC] Fix hang in start_ldr if _end or _edata is unaligned
  [POWERPC] Fix spelling errors in ucc_fast.c and ucc_slow.c
  [POWERPC] Don't require execute perms on wrapper when building zImage.initrd
  [POWERPC] Add 970GX cputable entry
  [POWERPC] Fix build breakage with CONFIG_PPC32
  [POWERPC] Fix compiler warning message on get_property call
  [POWERPC] Simplify stolen time calculation
......@@ -115,7 +115,7 @@ endif
quiet_cmd_wrap = WRAP $@
cmd_wrap =$(CONFIG_SHELL) $(wrapper) -c -o $@ -p $2 $(CROSSWRAP) vmlinux
quiet_cmd_wrap_initrd = WRAP $@
cmd_wrap_initrd =$(wrapper) -c -o $@ -p $2 $(CROSSWRAP) \
cmd_wrap_initrd =$(CONFIG_SHELL) $(wrapper) -c -o $@ -p $2 $(CROSSWRAP) \
-i $(obj)/ramdisk.image.gz vmlinux
$(obj)/zImage.chrp: vmlinux $(wrapperbits)
......
......@@ -254,6 +254,7 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_TUNNEL=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
......@@ -275,7 +276,9 @@ CONFIG_INET6_XFRM_TUNNEL=m
CONFIG_INET6_TUNNEL=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=y
CONFIG_INET6_XFRM_MODE_TUNNEL=y
# CONFIG_INET6_XFRM_MODE_BEET is not set
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
# CONFIG_IPV6_SIT is not set
CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_SUBTREES is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
......@@ -405,6 +408,12 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
#
# Misc devices
#
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
#
# ATA/ATAPI/MFM/RLL support
#
......@@ -738,7 +747,6 @@ CONFIG_GEN_RTC=y
# TPM devices
#
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
# I2C support
......@@ -802,6 +810,7 @@ CONFIG_I2C_ALGOBIT=y
#
# Dallas's 1-wire bus
#
# CONFIG_W1 is not set
#
# Hardware Monitoring support
......@@ -809,15 +818,10 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_HWMON is not set
# CONFIG_HWMON_VID is not set
#
# Misc devices
#
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
......@@ -923,6 +927,7 @@ CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
# CONFIG_EXT4DEV_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
......@@ -930,6 +935,7 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
......@@ -1129,6 +1135,7 @@ CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_FORCED_INLINING is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
......
......@@ -18,6 +18,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
......@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
#define PPC_FEATURE_SPE_COMP 0
#endif
struct cpu_spec cpu_specs[] = {
static struct cpu_spec cpu_specs[] = {
#ifdef CONFIG_PPC64
{ /* Power3 */
.pvr_mask = 0xffff0000,
......@@ -227,6 +228,21 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970GX */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00450000,
.cpu_name = "PPC970GX",
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* Power5 GR */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000,
......@@ -1152,3 +1168,71 @@ struct cpu_spec cpu_specs[] = {
#endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */
};
struct cpu_spec *identify_cpu(unsigned long offset)
{
struct cpu_spec *s = cpu_specs;
struct cpu_spec **cur = &cur_cpu_spec;
unsigned int pvr = mfspr(SPRN_PVR);
int i;
s = PTRRELOC(s);
cur = PTRRELOC(cur);
if (*cur != NULL)
return PTRRELOC(*cur);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
if ((pvr & s->pvr_mask) == s->pvr_value) {
*cur = cpu_specs + i;
#ifdef CONFIG_PPC64
/* ppc64 expects identify_cpu to also call setup_cpu
* for that processor. I will consolidate that at a
* later time, for now, just use our friend #ifdef.
* we also don't need to PTRRELOC the function pointer
* on ppc64 as we are running at 0 in real mode.
*/
if (s->cpu_setup) {
s->cpu_setup(offset, s);
}
#endif /* CONFIG_PPC64 */
return s;
}
BUG();
return NULL;
}
void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
struct fixup_entry {
unsigned long mask;
unsigned long value;
long start_off;
long end_off;
} *fcur, *fend;
fcur = fixup_start;
fend = fixup_end;
for (; fcur < fend; fcur++) {
unsigned int *pstart, *pend, *p;
if ((value & fcur->mask) == fcur->value)
continue;
/* These PTRRELOCs will disappear once the new scheme for
* modules and vdso is implemented
*/
pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
for (p = pstart; p < pend; p++) {
*p = 0x60000000u;
asm volatile ("dcbst 0, %0" : : "r" (p));
}
asm volatile ("sync" : : : "memory");
for (p = pstart; p < pend; p++)
asm volatile ("icbi 0,%0" : : "r" (p));
asm volatile ("sync; isync" : : : "memory");
}
}
......@@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOAD_REG_IMMEDIATE(r3,cpu_specs)
LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
......@@ -1646,6 +1641,8 @@ _GLOBAL(__start_initialization_multiplatform)
cmpwi r0,0x3c /* 970FX */
beq 1f
cmpwi r0,0x44 /* 970MP */
beq 1f
cmpwi r0,0x45 /* 970GX */
bne 2f
1: bl .__cpu_preinit_ppc970
2:
......@@ -1964,13 +1961,6 @@ _STATIC(start_here_multiplatform)
addi r2,r2,0x4000
add r2,r2,r26
LOAD_REG_IMMEDIATE(r3, cpu_specs)
add r3,r3,r26
LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
add r4,r4,r26
mr r5,r26
bl .identify_cpu
/* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */
......@@ -2000,13 +1990,6 @@ _STATIC(start_here_common)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
/* Apply the CPUs-specific fixups (nop out sections not relevant
* to this CPU
*/
li r3,0
bl .do_cpu_ftr_fixups
bl .do_fw_ftr_fixups
/* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13)
......
......@@ -101,80 +101,6 @@ _GLOBAL(reloc_got2)
mtlr r11
blr
/*
* identify_cpu,
* called with r3 = data offset and r4 = CPU number
* doesn't change r3
*/
_GLOBAL(identify_cpu)
addis r8,r3,cpu_specs@ha
addi r8,r8,cpu_specs@l
mfpvr r7
1:
lwz r5,CPU_SPEC_PVR_MASK(r8)
and r5,r5,r7
lwz r6,CPU_SPEC_PVR_VALUE(r8)
cmplw 0,r6,r5
beq 1f
addi r8,r8,CPU_SPEC_ENTRY_SIZE
b 1b
1:
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
sub r8,r8,r3
stw r8,0(r6)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
lwz r4,0(r6)
add r4,r4,r3
lwz r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
addis r6,r3,__start___ftr_fixup@ha
addi r6,r6,__start___ftr_fixup@l
addis r7,r3,__stop___ftr_fixup@ha
addi r7,r7,__stop___ftr_fixup@l
/* Do the fixup */
1: cmplw 0,r6,r7
bgelr
addi r6,r6,16
lwz r8,-16(r6) /* mask */
and r8,r8,r4
lwz r9,-12(r6) /* value */
cmplw 0,r8,r9
beq 1b
lwz r8,-8(r6) /* section begin */
lwz r9,-4(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
add r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
......
......@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
/*
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
cmpdi 0,r4,0
add r4,r4,r5
beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
mr r4,r3
mr r3,r5
bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
sub r6,r6,r3
LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* do_fw_ftr_fixups - goes through the list of firmware feature fixups
* and writes nop's over sections of code that don't apply for this firmware.
* r3 = data offset (not changed)
*/
_GLOBAL(do_fw_ftr_fixups)
/* Get firmware features */
LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
sub r6,r6,r3
ld r4,0(r6)
/* Get the fixup table */
LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
sub r6,r6,r3
LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
BEGIN_FTR_SECTION
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
......
......@@ -24,6 +24,8 @@
#include <linux/kernel.h>
#include <linux/cache.h>
#include "setup.h"
#if 0
#define DEBUGP printk
#else
......@@ -269,33 +271,50 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
char *secstrings;
unsigned int i;
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++)
if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
return &sechdrs[i];
return NULL;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
char *secstrings;
unsigned int i;
const Elf_Shdr *sect;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
me->arch.bug_table = (void *) sechdrs[i].sh_addr;
me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
sect = find_section(hdr, sechdrs, "__bug_table");
if (sect != NULL) {
me->arch.bug_table = (void *) sect->sh_addr;
me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
if (sect != NULL)
do_feature_fixups(cur_cpu_spec->cpu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
return 0;
}
......
......@@ -22,6 +22,9 @@
#include <linux/vmalloc.h>
#include <asm/module.h>
#include <asm/uaccess.h>
#include <asm/firmware.h>
#include "setup.h"
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
......@@ -400,6 +403,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
| (value & 0x03fffffc);
break;
case R_PPC64_REL64:
/* 64 bits relative (used by features fixups) */
*location = value - (unsigned long)location;
break;
default:
printk("%s: Unknown ADD relocation: %lu\n",
me->name,
......@@ -413,23 +421,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
LIST_HEAD(module_bug_list);
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
char *secstrings;
unsigned int i;
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++)
if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
return &sechdrs[i];
return NULL;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
me->arch.bug_table = (void *) sechdrs[i].sh_addr;
me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
sect = find_section(hdr, sechdrs, "__bug_table");
if (sect != NULL) {
me->arch.bug_table = (void *) sect->sh_addr;
me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
......@@ -439,6 +457,19 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
list_add(&me->arch.bug_list, &module_bug_list);
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
if (sect != NULL)
do_feature_fixups(cur_cpu_spec->cpu_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
if (sect != NULL)
do_feature_fixups(powerpc_firmware_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
return 0;
}
......
......@@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(find_all_nodes);
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int device_is_compatible(struct device_node *device, const char *compat)
int device_is_compatible(const struct device_node *device, const char *compat)
{
const char* cp;
int cplen, l;
......@@ -1491,7 +1491,8 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
struct property *of_find_property(struct device_node *np, const char *name,
struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
{
struct property *pp;
......@@ -1512,7 +1513,8 @@ struct property *of_find_property(struct device_node *np, const char *name,
* Find a property with a given name for a given node
* and return the value.
*/
const void *get_property(struct device_node *np, const char *name, int *lenp)
const void *get_property(const struct device_node *np, const char *name,
int *lenp)
{
struct property *pp = of_find_property(np,name,lenp);
return pp ? pp->value : NULL;
......
......@@ -91,6 +91,7 @@ int ucache_bsize;
unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */
......@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
identify_cpu(offset, 0);
do_cpu_ftr_fixups(offset);
spec = identify_cpu(offset);
do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
return KERNELBASE + offset;
}
......
......@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr)
{
/* Identify CPU type */
identify_cpu(0);
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0);
......@@ -348,6 +351,14 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
/* Apply the CPUs-specific and firmware specific fixups to kernel
* text (nop out sections not relevant to this CPU or this firmware)
*/
do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
......
......@@ -220,11 +220,8 @@ static void account_process_time(struct pt_regs *regs)
*/
struct cpu_purr_data {
int initialized; /* thread is running */
u64 tb0; /* timebase at origin time */
u64 purr0; /* PURR at origin time */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
u64 stolen; /* stolen time so far */
spinlock_t lock;
};
......@@ -234,10 +231,8 @@ static void snapshot_tb_and_purr(void *data)
{
struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
p->tb0 = mftb();
p->purr0 = mfspr(SPRN_PURR);
p->tb = p->tb0;
p->purr = 0;
p->tb = mftb();
p->purr = mfspr(SPRN_PURR);
wmb();
p->initialized = 1;
}
......@@ -258,37 +253,24 @@ void snapshot_timebases(void)
void calculate_steal_time(void)
{
u64 tb, purr, t0;
u64 tb, purr;
s64 stolen;
struct cpu_purr_data *p0, *pme, *phim;
int cpu;
struct cpu_purr_data *pme;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
cpu = smp_processor_id();
pme = &per_cpu(cpu_purr_data, cpu);
pme = &per_cpu(cpu_purr_data, smp_processor_id());
if (!pme->initialized)
return; /* this can happen in early boot */
p0 = &per_cpu(cpu_purr_data, cpu & ~1);
phim = &per_cpu(cpu_purr_data, cpu ^ 1);
spin_lock(&p0->lock);
spin_lock(&pme->lock);
tb = mftb();
purr = mfspr(SPRN_PURR) - pme->purr0;
if (!phim->initialized || !cpu_online(cpu ^ 1)) {
stolen = (tb - pme->tb) - (purr - pme->purr);
} else {
t0 = pme->tb0;
if (phim->tb0 < t0)
t0 = phim->tb0;
stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
}
if (stolen > 0) {
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
if (stolen > 0)
account_steal_time(current, stolen);
p0->stolen += stolen;
}
pme->tb = tb;
pme->purr = purr;
spin_unlock(&p0->lock);
spin_unlock(&pme->lock);
}
/*
......@@ -297,30 +279,17 @@ void calculate_steal_time(void)
*/
static void snapshot_purr(void)
{
int cpu;
u64 purr;
struct cpu_purr_data *p0, *pme, *phim;
struct cpu_purr_data *pme;
unsigned long flags;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
cpu = smp_processor_id();
pme = &per_cpu(cpu_purr_data, cpu);
p0 = &per_cpu(cpu_purr_data, cpu & ~1);
phim = &per_cpu(cpu_purr_data, cpu ^ 1);
spin_lock_irqsave(&p0->lock, flags);
pme->tb = pme->tb0 = mftb();
purr = mfspr(SPRN_PURR);
if (!phim->initialized) {
pme->purr = 0;
pme->purr0 = purr;
} else {
/* set p->purr and p->purr0 for no change in p0->stolen */
pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
pme->purr0 = purr - pme->purr;
}
pme = &per_cpu(cpu_purr_data, smp_processor_id());
spin_lock_irqsave(&pme->lock, flags);
pme->tb = mftb();
pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
spin_unlock_irqrestore(&p0->lock, flags);
spin_unlock_irqrestore(&pme->lock, flags);
}
#endif /* CONFIG_PPC_SPLPAR */
......
......@@ -36,6 +36,8 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
#include "setup.h"
#undef DEBUG
#ifdef DEBUG
......@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return 0;
}
static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
void *start32;
unsigned long size32;
#ifdef CONFIG_PPC64
void *start64;
unsigned long size64;
start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
if (start64)
do_feature_fixups(cur_cpu_spec->cpu_features,
start64, start64 + size64);
start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
if (start64)
do_feature_fixups(powerpc_firmware_features,
start64, start64 + size64);
#endif /* CONFIG_PPC64 */
start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
if (start32)
do_feature_fixups(cur_cpu_spec->cpu_features,
start32, start32 + size32);
#ifdef CONFIG_PPC64
start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
if (start32)
do_feature_fixups(powerpc_firmware_features,
start32, start32 + size32);
#endif /* CONFIG_PPC64 */
return 0;
}
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
......@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
if (vdso_fixup_datapage(&v32, &v64))
return -1;
if (vdso_fixup_features(&v32, &v64))
return -1;
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
......@@ -714,6 +756,7 @@ void __init vdso_init(void)
* Setup the syscall map in the vDOS
*/
vdso_setup_syscall_map();
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
......
......@@ -32,6 +32,18 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(8);
__ftr_fixup : {
*(__ftr_fixup)
}
#ifdef CONFIG_PPC64
. = ALIGN(8);
__fw_ftr_fixup : {
*(__fw_ftr_fixup)
}
#endif
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
......
......@@ -229,8 +229,10 @@ V_FUNCTION_BEGIN(__do_get_xsec)
xor r0,r8,r8 /* create dependency */
add r3,r3,r0
/* Get TB & offset it */
mftb r7
/* Get TB & offset it. We use the MFTB macro which will generate
* workaround code for Cell.
*/
MFTB(r7)
ld r9,CFG_TB_ORIG_STAMP(r3)
subf r7,r9,r7
......
......@@ -31,6 +31,16 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(8);
__ftr_fixup : {
*(__ftr_fixup)
}
. = ALIGN(8);
__fw_ftr_fixup : {
*(__fw_ftr_fixup)
}
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
......
......@@ -76,7 +76,7 @@ static inline int mmcra_must_set_sample(void)
{
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
__is_processor(PV_970) || __is_processor(PV_970FX) ||
__is_processor(PV_970MP))
__is_processor(PV_970MP) || __is_processor(PV_970GX))
return 1;
return 0;
......
......@@ -385,7 +385,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
udata = (void __user *)buf;
spu_acquire(ctx);
for (count = 0; count <= len; count += 4, udata++) {
for (count = 0; (count + 4) <= len; count += 4, udata++) {
int ret;
ret = ctx->ops->mbox_read(ctx, &mbox_data);
if (ret == 0)
......
......@@ -147,7 +147,7 @@ static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
static u32 spu_hw_signal2_read(struct spu_context *ctx)
{
return in_be32(&ctx->spu->problem->signal_notify1);
return in_be32(&ctx->spu->problem->signal_notify2);
}
static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
......
......@@ -477,8 +477,10 @@ static void __init chrp_find_8259(void)
" address, polling\n");
i8259_init(pic, chrp_int_ack);
if (ppc_md.get_irq == NULL)
if (ppc_md.get_irq == NULL) {
ppc_md.get_irq = i8259_irq;
irq_set_default_host(i8259_get_host());
}
if (chrp_mpic != NULL) {
cascade_irq = irq_of_parse_and_map(pic, 0);
if (cascade_irq == NO_IRQ)
......
......@@ -694,6 +694,11 @@ void * __init iSeries_early_setup(void)
{
unsigned long phys_mem_size;
/* Identify CPU type. This is done again by the common code later
* on but calling this function multiple times is fine.
*/
identify_cpu(0);
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
......
......@@ -45,7 +45,8 @@
.section .text
.align 5
#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) || \
(defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32))
/* This gets called by via-pmu.c late during the sleep process.
* The PMU was already send the sleep command and will shut us down
......
......@@ -224,6 +224,11 @@ static struct irq_host_ops i8259_host_ops = {
.xlate = i8259_host_xlate,
};
struct irq_host *i8259_get_host(void)
{
return i8259_host;
}
/**
* i8259_init - Initialize the legacy controller
* @node: device node of the legacy PIC (can be NULL, but then, it will match
......
......@@ -473,9 +473,9 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
desc->status |= IRQ_LEVEL;
set_irq_handler(virq, handle_level_irq);
desc->handle_irq = handle_level_irq;
} else {
set_irq_handler(virq, handle_edge_irq);
desc->handle_irq = handle_edge_irq;
}
/* only EXT IRQ senses are programmable on ipic
......
......@@ -207,6 +207,7 @@ int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode)
case QE_CLK18: source = 8; break;
case QE_CLK7: source = 9; break;
case QE_CLK8: source = 10; break;
case QE_CLK16: source = 11; break;
default: source = -1; break;
}
break;
......@@ -222,6 +223,7 @@ int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode)
case QE_CLK22: source = 8; break;
case QE_CLK7: source = 9; break;
case QE_CLK8: source = 10; break;
case QE_CLK16: source = 11; break;
default: source = -1; break;
}
break;
......
......@@ -163,7 +163,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* check if the UCC port number is in range. */
if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
uccf_err("ucc_fast_init: Illagal UCC number!");
uccf_err("ucc_fast_init: Illegal UCC number!");
return -EINVAL;
}
......
......@@ -152,7 +152,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* check if the UCC port number is in range. */
if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
uccs_err("ucc_slow_init: Illagal UCC number!");
uccs_err("ucc_slow_init: Illegal UCC number!");
return -EINVAL;
}
......
......@@ -48,7 +48,7 @@ phys_addr_t get_csrbase(void)
tsi = of_find_node_by_type(NULL, "tsi-bridge");
if (tsi) {
unsigned int size;
void *prop = get_property(tsi, "reg", &size);
const void *prop = get_property(tsi, "reg", &size);
tsi108_csr_base = of_translate_address(tsi, prop);
of_node_put(tsi);
};
......@@ -79,7 +79,7 @@ static int __init tsi108_eth_of_init(void)
hw_info tsi_eth_data;
unsigned int *id;
unsigned int *phy_id;
void *mac_addr;
const void *mac_addr;
phandle *ph;
memset(r, 0, sizeof(r));
......
......@@ -154,8 +154,8 @@ do_relocate_out:
start_ldr:
/* Clear all of BSS and set up stack for C calls */
lis r3,edata@h
ori r3,r3,edata@l
lis r3,__bss_start@h
ori r3,r3,__bss_start@l
lis r4,end@h
ori r4,r4,end@l
subi r3,r3,4
......@@ -163,7 +163,7 @@ start_ldr:
li r0,0
50: stwu r0,4(r3)
cmpw cr0,r3,r4
bne 50b
blt 50b
90: mr r9,r1 /* Save old stack pointer (in case it matters) */
lis r1,.stack@h
ori r1,r1,.stack@l
......
......@@ -109,80 +109,6 @@ _GLOBAL(reloc_got2)
mtlr r11
blr
/*
* identify_cpu,
* called with r3 = data offset and r4 = CPU number
* doesn't change r3
*/
_GLOBAL(identify_cpu)
addis r8,r3,cpu_specs@ha
addi r8,r8,cpu_specs@l
mfpvr r7
1:
lwz r5,CPU_SPEC_PVR_MASK(r8)
and r5,r5,r7
lwz r6,CPU_SPEC_PVR_VALUE(r8)
cmplw 0,r6,r5
beq 1f
addi r8,r8,CPU_SPEC_ENTRY_SIZE
b 1b
1:
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
sub r8,r8,r3
stw r8,0(r6)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
lwz r4,0(r6)
add r4,r4,r3
lwz r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
addis r6,r3,__start___ftr_fixup@ha
addi r6,r6,__start___ftr_fixup@l
addis r7,r3,__stop___ftr_fixup@ha
addi r7,r7,__stop___ftr_fixup@l
/* Do the fixup */
1: cmplw 0,r6,r7
bgelr
addi r6,r6,16
lwz r8,-16(r6) /* mask */
and r8,r8,r4
lwz r9,-12(r6) /* value */
cmplw 0,r8,r9
beq 1b
lwz r8,-8(r6) /* section begin */
lwz r9,-4(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
add r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
......
......@@ -38,6 +38,7 @@
#include <asm/nvram.h>
#include <asm/xmon.h>
#include <asm/ocp.h>
#include <asm/prom.h>
#define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
......@@ -53,8 +54,6 @@
extern void platform_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
extern void identify_cpu(unsigned long offset, unsigned long cpu);
extern void do_cpu_ftr_fixups(unsigned long offset);
extern void reloc_got2(unsigned long offset);
extern void ppc6xx_idle(void);
......@@ -301,6 +300,7 @@ early_init(int r3, int r4, int r5)
{
unsigned long phys;
unsigned long offset = reloc_offset();
struct cpu_spec *spec;
/* Default */
phys = offset + KERNELBASE;
......@@ -313,8 +313,10 @@ early_init(int r3, int r4, int r5)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
identify_cpu(offset, 0);
do_cpu_ftr_fixups(offset);
spec = identify_cpu(offset);
do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
return phys;
}
......
......@@ -14,6 +14,58 @@
# define ASM_CONST(x) __ASM_CONST(x)
#endif
/*
* Feature section common macros
*
* Note that the entries now contain offsets between the table entry
* and the code rather than absolute code pointers in order to be
* useable with the vdso shared library. There is also an assumption
* that values will be negative, that is, the fixup table has to be
* located after the code it fixes up.
*/
#ifdef CONFIG_PPC64
#ifdef __powerpc64__
/* 64 bits kernel, 64 bits code */
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
99: \
.section sect,"a"; \
.align 3; \
98: \
.llong msk; \
.llong val; \
.llong label##b-98b; \
.llong 99b-98b; \
.previous
#else /* __powerpc64__ */
/* 64 bits kernel, 32 bits code (ie. vdso32) */
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
99: \
.section sect,"a"; \
.align 3; \
98: \
.llong msk; \
.llong val; \
.long 0xffffffff; \
.long label##b-98b; \
.long 0xffffffff; \
.long 99b-98b; \
.previous
#endif /* !__powerpc64__ */
#else /* CONFIG_PPC64 */
/* 32 bits kernel, 32 bits code */
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
99: \
.section sect,"a"; \
.align 2; \
98: \
.long msk; \
.long val; \
.long label##b-98b; \
.long 99b-98b; \
.previous
#endif /* !CONFIG_PPC64 */
#ifdef __powerpc64__
/* operations for longs and pointers */
......
......@@ -89,8 +89,11 @@ struct cpu_spec {
extern struct cpu_spec *cur_cpu_spec;
extern void identify_cpu(unsigned long offset, unsigned long cpu);
extern void do_cpu_ftr_fixups(unsigned long offset);
extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
extern struct cpu_spec *identify_cpu(unsigned long offset);
extern void do_feature_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
#endif /* __ASSEMBLY__ */
......@@ -144,6 +147,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
#ifndef __ASSEMBLY__
......@@ -332,7 +336,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
......@@ -431,29 +435,12 @@ static inline int cpu_has_feature(unsigned long feature)
#ifdef __ASSEMBLY__
#define BEGIN_FTR_SECTION 98:
#ifndef __powerpc64__
#define BEGIN_FTR_SECTION_NESTED(label) label:
#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97)
#define END_FTR_SECTION_NESTED(msk, val, label) \
MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
#define END_FTR_SECTION(msk, val) \
99: \
.section __ftr_fixup,"a"; \
.align 2; \
.long msk; \
.long val; \
.long 98b; \
.long 99b; \
.previous
#else /* __powerpc64__ */
#define END_FTR_SECTION(msk, val) \
99: \
.section __ftr_fixup,"a"; \
.align 3; \
.llong msk; \
.llong val; \
.llong 98b; \
.llong 99b; \
.previous
#endif /* __powerpc64__ */
END_FTR_SECTION_NESTED(msk, val, 97)
#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
......
......@@ -96,19 +96,16 @@ extern void machine_check_fwnmi(void);
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
#else /* __ASSEMBLY__ */
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
#define BEGIN_FW_FTR_SECTION 96:
#else /* __ASSEMBLY__ */
#define BEGIN_FW_FTR_SECTION_NESTED(label) label:
#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97)
#define END_FW_FTR_SECTION_NESTED(msk, val, label) \
MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
#define END_FW_FTR_SECTION(msk, val) \
97: \
.section __fw_ftr_fixup,"a"; \
.align 3; \
.llong msk; \
.llong val; \
.llong 96b; \
.llong 97b; \
.previous
END_FW_FTR_SECTION_NESTED(msk, val, 97)
#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk))
#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
......
......@@ -7,6 +7,7 @@
#ifdef CONFIG_PPC_MERGE
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
extern struct irq_host *i8259_get_host(void);
#else
extern void i8259_init(unsigned long intack_addr, int offset);
extern int i8259_irq(void);
......
......@@ -30,9 +30,9 @@ BEGIN_FTR_SECTION; \
mfspr ra,SPRN_PURR; /* get processor util. reg */ \
END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
BEGIN_FTR_SECTION; \
mftb ra; /* or get TB if no PURR */ \
MFTB(ra); /* or get TB if no PURR */ \
END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
ld rb,PACA_STARTPURR(r13); \
ld rb,PACA_STARTPURR(r13); \
std ra,PACA_STARTPURR(r13); \
subf rb,rb,ra; /* subtract start value */ \
ld ra,PACA_USER_TIME(r13); \
......@@ -45,9 +45,9 @@ BEGIN_FTR_SECTION; \
mfspr ra,SPRN_PURR; /* get processor util. reg */ \
END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
BEGIN_FTR_SECTION; \
mftb ra; /* or get TB if no PURR */ \
MFTB(ra); /* or get TB if no PURR */ \
END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
ld rb,PACA_STARTPURR(r13); \
ld rb,PACA_STARTPURR(r13); \
std ra,PACA_STARTPURR(r13); \
subf rb,rb,ra; /* subtract start value */ \
ld ra,PACA_SYSTEM_TIME(r13); \
......@@ -274,6 +274,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
#define ISYNC_601
#endif
#ifdef CONFIG_PPC_CELL
#define MFTB(dest) \
90: mftb dest; \
BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#else
#define MFTB(dest) mftb dest
#endif
#ifndef CONFIG_SMP
#define TLBSYNC
......
......@@ -134,7 +134,7 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
extern struct property *of_find_property(struct device_node *np,
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
extern struct device_node *of_node_get(struct device_node *node);
......@@ -158,10 +158,12 @@ extern void of_detach_node(const struct device_node *);
extern void finish_device_tree(void);
extern void unflatten_device_tree(void);
extern void early_init_devtree(void *);
extern int device_is_compatible(struct device_node *device, const char *);
extern int device_is_compatible(const struct device_node *device,
const char *);
extern int machine_is_compatible(const char *compat);
extern const void *get_property(struct device_node *node, const char *name,
int *lenp);
extern const void *get_property(const struct device_node *node,
const char *name,
int *lenp);
extern void print_properties(struct device_node *node);
extern int prom_n_addr_cells(struct device_node* np);
extern int prom_n_size_cells(struct device_node* np);
......
......@@ -591,6 +591,7 @@
#define PV_630 0x0040
#define PV_630p 0x0041
#define PV_970MP 0x0044
#define PV_970GX 0x0045
#define PV_BE 0x0070
#define PV_PA6T 0x0090
......@@ -618,10 +619,35 @@
: "=r" (rval)); rval;})
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
#ifdef __powerpc64__
#ifdef CONFIG_PPC_CELL
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mftb %0;\n" \
"97: cmpwi %0,0;\n" \
" beq- 90b;\n" \
"99:\n" \
".section __ftr_fixup,\"a\"\n" \
".align 3\n" \
"98:\n" \
" .llong %1\n" \
" .llong %1\n" \
" .llong 97b-98b\n" \
" .llong 99b-98b\n" \
".previous" \
: "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
#else
#define mftb() ({unsigned long rval; \
asm volatile("mftb %0" : "=r" (rval)); rval;})
#endif /* !CONFIG_PPC_CELL */
#else /* __powerpc64__ */
#define mftbl() ({unsigned long rval; \
asm volatile("mftbl %0" : "=r" (rval)); rval;})
#define mftbu() ({unsigned long rval; \
asm volatile("mftbu %0" : "=r" (rval)); rval;})
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
......
......@@ -82,30 +82,35 @@ struct div_result {
#define __USE_RTC() 0
#endif
/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
#ifdef CONFIG_PPC64
/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
#define get_tbl get_tb
#else
static inline unsigned long get_tbl(void)
{
unsigned long tbl;
#if defined(CONFIG_403GCX)
unsigned long tbl;
asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
return tbl;
#else
asm volatile("mftb %0" : "=r" (tbl));
return mftbl();
#endif
return tbl;
}
static inline unsigned int get_tbu(void)
{
#ifdef CONFIG_403GCX
unsigned int tbu;
#if defined(CONFIG_403GCX)
asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
return tbu;
#else
asm volatile("mftbu %0" : "=r" (tbu));
return mftbu();
#endif
return tbu;
}
#endif /* !CONFIG_PPC64 */
static inline unsigned int get_rtcl(void)
{
......@@ -131,7 +136,7 @@ static inline u64 get_tb(void)
{
return mftb();
}
#else
#else /* CONFIG_PPC64 */
static inline u64 get_tb(void)
{
unsigned int tbhi, tblo, tbhi2;
......@@ -144,7 +149,7 @@ static inline u64 get_tb(void)
return ((u64)tbhi << 32) | tblo;
}
#endif
#endif /* !CONFIG_PPC64 */
static inline void set_tb(unsigned int upper, unsigned int lower)
{
......
......@@ -8,6 +8,7 @@
*/
#include <asm/cputable.h>
#include <asm/reg.h>
#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */
......@@ -15,13 +16,11 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
cycles_t ret;
#ifdef __powerpc64__
__asm__ __volatile__("mftb %0" : "=r" (ret) : );
return mftb();
#else
cycles_t ret;
/*
* For the "cycle" counter we use the timebase lower half.
* Currently only used on SMP.
......@@ -30,18 +29,19 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
"98: mftb %0\n"
"97: mftb %0\n"
"99:\n"
".section __ftr_fixup,\"a\"\n"
".align 2\n"
"98:\n"
" .long %1\n"
" .long 0\n"
" .long 98b\n"
" .long 99b\n"
" .long 97b-98b\n"
" .long 99b-98b\n"
".previous"
: "=r" (ret) : "i" (CPU_FTR_601));
#endif
return ret;
#endif
}
#endif /* __KERNEL__ */
......
......@@ -921,6 +921,8 @@ static int init_section_ref_ok(const char *name)
".fixup",
".smp_locks",
".plt", /* seen on ARCH=um build on x86_64. Harmless */
"__ftr_fixup", /* powerpc cpu feature fixup */
"__fw_ftr_fixup", /* powerpc firmware feature fixup */
NULL
};
/* Start of section names */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册