提交 9bf9b2f3 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (53 commits)
  powerpc: Support for relocatable kdump kernel
  powerpc: Don't use a 16G page if beyond mem= limits
  powerpc: Add del_node() for early boot code to prune inapplicable devices.
  powerpc: Further compile fixup for STRICT_MM_TYPECHECKS
  powerpc: Remove empty #else from signal_64.c
  powerpc: Move memory size print into common show_cpuinfo for 32-bit
  hvc_console: Remove __devexit annotation of hvc_remove()
  hvc_console: Add support for tty window resizing
  hvc_console: Fix loop if put_char() returns 0
  hvc_console: Add tty driver flag TTY_DRIVER_RESET_TERMIOS
  hvc_console: Add a hangup notifier for backends
  powerpc/83xx: Add DS1339 RTC support for MPC8349E-mITX boards .dts
  powerpc/83xx: Add support for MCU microcontroller in .dts files
  powerpc/85xx: Move mpc8572ds.dts to address-cells/size-cells = <2>
  of/spi: Support specifying chip select as active high via device tree
  powerpc: Remove device_type = "board_control" properties in .dts files
  i2c-cpm: Suppress autoprobing for devices
  powerpc/85xx: Fix mpc8536ds dma interrupt numbers
  powerpc/85xx: Enable enhanced functions for 8536 TSEC
  powerpc: Delete unused prom_strtoul and prom_memparse
  ...
......@@ -109,7 +109,8 @@ There are two possible methods of using Kdump.
2) Or use the system kernel binary itself as dump-capture kernel and there is
no need to build a separate dump-capture kernel. This is possible
only with the architecutres which support a relocatable kernel. As
of today, i386, x86_64 and ia64 architectures support relocatable kernel.
of today, i386, x86_64, ppc64 and ia64 architectures support relocatable
kernel.
Building a relocatable kernel is advantageous from the point of view that
one does not have to build a second kernel for capturing the dump. But
......@@ -207,8 +208,15 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64)
Dump-capture kernel config options (Arch Dependent, ppc64)
----------------------------------------------------------
* Make and install the kernel and its modules. DO NOT add this kernel
to the boot loader configuration files.
1) Enable "Build a kdump crash kernel" support under "Kernel" options:
CONFIG_CRASH_DUMP=y
2) Enable "Build a relocatable kernel" support
CONFIG_RELOCATABLE=y
Make and install the kernel and its modules.
Dump-capture kernel config options (Arch Dependent, ia64)
----------------------------------------------------------
......
......@@ -1917,6 +1917,8 @@ platforms are moved over to use the flattened-device-tree model.
inverse clock polarity (CPOL) mode
- spi-cpha - (optional) Empty property indicating device requires
shifted clock phase (CPHA) mode
- spi-cs-high - (optional) Empty property indicating device requires
chip select active high
SPI example for an MPC5200 SPI bus:
spi@f00 {
......
......@@ -2,13 +2,13 @@
Required properties:
- device_type : Should be "board-control"
- compatible : Should be "fsl,<board>-bcsr"
- reg : Offset and length of the register set for the device
Example:
bcsr@f8000000 {
device_type = "board-control";
compatible = "fsl,mpc8360mds-bcsr";
reg = <f8000000 8000>;
};
......
......@@ -19,9 +19,6 @@ config WORD_SIZE
default 64 if PPC64
default 32 if !PPC64
config PPC_MERGE
def_bool y
config ARCH_PHYS_ADDR_T_64BIT
def_bool PPC64 || PHYS_64BIT
......@@ -326,13 +323,11 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC_MULTIPLATFORM && PPC64
depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE
help
Build a kernel suitable for use as a kdump capture kernel.
The kernel will be linked at a different address than normal, and
so can only be used for Kdump.
Don't change this unless you know what you are doing.
The same kernel binary can be used as production kernel and dump
capture kernel.
config PHYP_DUMP
bool "Hypervisor-assisted dump (EXPERIMENTAL)"
......@@ -832,11 +827,9 @@ config PAGE_OFFSET
default "0xc000000000000000"
config KERNEL_START
hex
default "0xc000000002000000" if CRASH_DUMP
default "0xc000000000000000"
config PHYSICAL_START
hex
default "0x02000000" if CRASH_DUMP
default "0x00000000"
endif
......
......@@ -68,7 +68,8 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \
cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
cuboot-acadia.c
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
......@@ -211,6 +212,7 @@ image-$(CONFIG_DEFAULT_UIMAGE) += uImage
# Board ports in arch/powerpc/platform/40x/Kconfig
image-$(CONFIG_EP405) += dtbImage.ep405
image-$(CONFIG_WALNUT) += treeImage.walnut
image-$(CONFIG_ACADIA) += cuImage.acadia
# Board ports in arch/powerpc/platform/44x/Kconfig
image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony
......@@ -319,6 +321,9 @@ $(obj)/zImage.iseries: vmlinux
$(obj)/uImage: vmlinux $(wrapperbits)
$(call if_changed,wrap,uboot)
$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
......
......@@ -11,7 +11,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Usage: addnote zImage [note.elf]
* Usage: addnote [-r realbase] zImage [note.elf]
*
* If note.elf is supplied, it is the name of an ELF file that contains
* an RPA note to use instead of the built-in one. Alternatively, the
......@@ -153,18 +153,31 @@ unsigned char *read_rpanote(const char *fname, int *nnp)
int
main(int ac, char **av)
{
int fd, n, i;
int fd, n, i, ai;
int ph, ps, np;
int nnote, nnote2, ns;
unsigned char *rpap;
if (ac != 2 && ac != 3) {
fprintf(stderr, "Usage: %s elf-file [rpanote.elf]\n", av[0]);
char *p, *endp;
ai = 1;
if (ac >= ai + 2 && strcmp(av[ai], "-r") == 0) {
/* process -r realbase */
p = av[ai + 1];
descr[1] = strtol(p, &endp, 16);
if (endp == p || *endp != 0) {
fprintf(stderr, "Can't parse -r argument '%s' as hex\n",
p);
exit(1);
}
ai += 2;
}
if (ac != ai + 1 && ac != ai + 2) {
fprintf(stderr, "Usage: %s [-r realbase] elf-file [rpanote.elf]\n", av[0]);
exit(1);
}
fd = open(av[1], O_RDWR);
fd = open(av[ai], O_RDWR);
if (fd < 0) {
perror(av[1]);
perror(av[ai]);
exit(1);
}
......@@ -184,12 +197,12 @@ main(int ac, char **av)
if (buf[E_IDENT+EI_CLASS] != ELFCLASS32
|| buf[E_IDENT+EI_DATA] != ELFDATA2MSB) {
fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n",
av[1]);
av[ai]);
exit(1);
}
if (ac == 3)
rpap = read_rpanote(av[2], &nnote2);
if (ac == ai + 2)
rpap = read_rpanote(av[ai + 1], &nnote2);
ph = GET_32BE(buf, E_PHOFF);
ps = GET_16BE(buf, E_PHENTSIZE);
......@@ -202,7 +215,7 @@ main(int ac, char **av)
for (i = 0; i < np; ++i) {
if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) {
fprintf(stderr, "%s already has a note entry\n",
av[1]);
av[ai]);
exit(0);
}
ph += ps;
......@@ -260,18 +273,18 @@ main(int ac, char **av)
exit(1);
}
if (i < n) {
fprintf(stderr, "%s: write truncated\n", av[1]);
fprintf(stderr, "%s: write truncated\n", av[ai]);
exit(1);
}
exit(0);
notelf:
fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]);
fprintf(stderr, "%s does not appear to be an ELF file\n", av[ai]);
exit(1);
nospace:
fprintf(stderr, "sorry, I can't find space in %s to put the note\n",
av[1]);
av[ai]);
exit(1);
}
......@@ -37,6 +37,10 @@ static void platform_fixups(void)
* this can do a simple path lookup.
*/
soc = find_node_by_devtype(NULL, "soc");
if (!soc)
soc = find_node_by_compatible(NULL, "fsl,mpc5200-immr");
if (!soc)
soc = find_node_by_compatible(NULL, "fsl,mpc5200b-immr");
if (soc) {
setprop(soc, "bus-frequency", &bd.bi_ipbfreq,
sizeof(bd.bi_ipbfreq));
......
/*
* Old U-boot compatibility for Acadia
*
* Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Copyright 2008 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "io.h"
#include "dcr.h"
#include "stdio.h"
#include "4xx.h"
#include "44x.h"
#include "cuboot.h"
#define TARGET_4xx
#include "ppcboot.h"
static bd_t bd;
#define CPR_PERD0_SPIDV_MASK 0x000F0000 /* SPI Clock Divider */
#define PLLC_SRC_MASK 0x20000000 /* PLL feedback source */
#define PLLD_FBDV_MASK 0x1F000000 /* PLL feedback divider value */
#define PLLD_FWDVA_MASK 0x000F0000 /* PLL forward divider A value */
#define PLLD_FWDVB_MASK 0x00000700 /* PLL forward divider B value */
#define PRIMAD_CPUDV_MASK 0x0F000000 /* CPU Clock Divisor Mask */
#define PRIMAD_PLBDV_MASK 0x000F0000 /* PLB Clock Divisor Mask */
#define PRIMAD_OPBDV_MASK 0x00000F00 /* OPB Clock Divisor Mask */
#define PRIMAD_EBCDV_MASK 0x0000000F /* EBC Clock Divisor Mask */
#define PERD0_PWMDV_MASK 0xFF000000 /* PWM Divider Mask */
#define PERD0_SPIDV_MASK 0x000F0000 /* SPI Divider Mask */
#define PERD0_U0DV_MASK 0x0000FF00 /* UART 0 Divider Mask */
#define PERD0_U1DV_MASK 0x000000FF /* UART 1 Divider Mask */
static void get_clocks(void)
{
unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i;
unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv;
unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB;
unsigned long div; /* total divisor udiv * bdiv */
unsigned long umin; /* minimum udiv */
unsigned short diff; /* smallest diff */
unsigned long udiv; /* best udiv */
unsigned short idiff; /* current diff */
unsigned short ibdiv; /* current bdiv */
unsigned long est; /* current estimate */
unsigned long baud;
void *np;
/* read the sysclk value from the CPLD */
sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000;
/*
* Read PLL Mode registers
*/
cpr_plld = CPR0_READ(DCRN_CPR0_PLLD);
cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC);
/*
* Determine forward divider A
*/
pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16);
/*
* Determine forward divider B
*/
pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8);
if (pllFwdDivB == 0)
pllFwdDivB = 8;
/*
* Determine FBK_DIV.
*/
pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24);
if (pllFbkDiv == 0)
pllFbkDiv = 256;
/*
* Read CPR_PRIMAD register
*/
cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD);
/*
* Determine PLB_DIV.
*/
pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16);
if (pllPlbDiv == 0)
pllPlbDiv = 16;
/*
* Determine EXTBUS_DIV.
*/
pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK);
if (pllExtBusDiv == 0)
pllExtBusDiv = 16;
/*
* Determine OPB_DIV.
*/
pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8);
if (pllOpbDiv == 0)
pllOpbDiv = 16;
/* There is a bug in U-Boot that prevents us from using
* bd.bi_opbfreq because U-Boot doesn't populate it for
* 405EZ. We get to calculate it, yay!
*/
freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv;
freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv;
plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ?
pllFwdDivB : pllFwdDiv) *
pllFbkDiv) / pllFwdDivB);
np = find_node_by_alias("serial0");
if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud))
fatal("no current-speed property\n\r");
udiv = 256; /* Assume lowest possible serial clk */
div = plloutb / (16 * baud); /* total divisor */
umin = (plloutb / freqOPB) << 1; /* 2 x OPB divisor */
diff = 256; /* highest possible */
/* i is the test udiv value -- start with the largest
* possible (256) to minimize serial clock and constrain
* search to umin.
*/
for (i = 256; i > umin; i--) {
ibdiv = div / i;
est = i * ibdiv;
idiff = (est > div) ? (est-div) : (div-est);
if (idiff == 0) {
udiv = i;
break; /* can't do better */
} else if (idiff < diff) {
udiv = i; /* best so far */
diff = idiff; /* update lowest diff*/
}
}
freqUART = plloutb / udiv;
dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq);
dt_fixup_clock("/plb/ebc", freqEBC);
dt_fixup_clock("/plb/opb", freqOPB);
dt_fixup_clock("/plb/opb/serial@ef600300", freqUART);
dt_fixup_clock("/plb/opb/serial@ef600400", freqUART);
}
static void acadia_fixups(void)
{
dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
get_clocks();
dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = acadia_fixups;
platform_ops.exit = ibm40x_dbcr_reset;
fdt_init(_dtb_start);
serial_console_init();
}
/*
* Device Tree Source for AMCC Acadia (405EZ)
*
* Copyright IBM Corp. 2008
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
model = "amcc,acadia";
compatible = "amcc,acadia";
dcr-parent = <&{/cpus/cpu@0}>;
aliases {
ethernet0 = &EMAC0;
serial0 = &UART0;
serial1 = &UART1;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "PowerPC,405EZ";
reg = <0x0>;
clock-frequency = <0>; /* Filled in by wrapper */
timebase-frequency = <0>; /* Filled in by wrapper */
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <16384>;
d-cache-size = <16384>;
dcr-controller;
dcr-access-method = "native";
};
};
memory {
device_type = "memory";
reg = <0x0 0x0>; /* Filled in by wrapper */
};
UIC0: interrupt-controller {
compatible = "ibm,uic-405ez", "ibm,uic";
interrupt-controller;
dcr-reg = <0x0c0 0x009>;
cell-index = <0>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
};
plb {
compatible = "ibm,plb-405ez", "ibm,plb3";
#address-cells = <1>;
#size-cells = <1>;
ranges;
clock-frequency = <0>; /* Filled in by wrapper */
MAL0: mcmal {
compatible = "ibm,mcmal-405ez", "ibm,mcmal";
dcr-reg = <0x380 0x62>;
num-tx-chans = <1>;
num-rx-chans = <1>;
interrupt-parent = <&UIC0>;
/* 405EZ has only 3 interrupts to the UIC, as
* SERR, TXDE, and RXDE are or'd together into
* one UIC bit
*/
interrupts = <
0x13 0x4 /* TXEOB */
0x15 0x4 /* RXEOB */
0x12 0x4 /* SERR, TXDE, RXDE */>;
};
POB0: opb {
compatible = "ibm,opb-405ez", "ibm,opb";
#address-cells = <1>;
#size-cells = <1>;
ranges;
dcr-reg = <0x0a 0x05>;
clock-frequency = <0>; /* Filled in by wrapper */
UART0: serial@ef600300 {
device_type = "serial";
compatible = "ns16550";
reg = <0xef600300 0x8>;
virtual-reg = <0xef600300>;
clock-frequency = <0>; /* Filled in by wrapper */
current-speed = <115200>;
interrupt-parent = <&UIC0>;
interrupts = <0x5 0x4>;
};
UART1: serial@ef600400 {
device_type = "serial";
compatible = "ns16550";
reg = <0xef600400 0x8>;
clock-frequency = <0>; /* Filled in by wrapper */
current-speed = <115200>;
interrupt-parent = <&UIC0>;
interrupts = <0x6 0x4>;
};
IIC: i2c@ef600500 {
compatible = "ibm,iic-405ez", "ibm,iic";
reg = <0xef600500 0x11>;
interrupt-parent = <&UIC0>;
interrupts = <0xa 0x4>;
};
GPIO0: gpio@ef600700 {
compatible = "ibm,gpio-405ez";
reg = <0xef600700 0x20>;
};
GPIO1: gpio@ef600800 {
compatible = "ibm,gpio-405ez";
reg = <0xef600800 0x20>;
};
EMAC0: ethernet@ef600900 {
device_type = "network";
compatible = "ibm,emac-405ez", "ibm,emac";
interrupt-parent = <&UIC0>;
interrupts = <
0x10 0x4 /* Ethernet */
0x11 0x4 /* Ethernet Wake up */>;
local-mac-address = [000000000000]; /* Filled in by wrapper */
reg = <0xef600900 0x70>;
mal-device = <&MAL0>;
mal-tx-channel = <0>;
mal-rx-channel = <0>;
cell-index = <0>;
max-frame-size = <1500>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
phy-mode = "mii";
phy-map = <0x0>;
};
CAN0: can@ef601000 {
compatible = "amcc,can-405ez";
reg = <0xef601000 0x620>;
interrupt-parent = <&UIC0>;
interrupts = <0x7 0x4>;
};
CAN1: can@ef601800 {
compatible = "amcc,can-405ez";
reg = <0xef601800 0x620>;
interrupt-parent = <&UIC0>;
interrupts = <0x8 0x4>;
};
cameleon@ef602000 {
compatible = "amcc,cameleon-405ez";
reg = <0xef602000 0x800>;
interrupt-parent = <&UIC0>;
interrupts = <0xb 0x4 0xc 0x4>;
};
ieee1588@ef602800 {
compatible = "amcc,ieee1588-405ez";
reg = <0xef602800 0x60>;
interrupt-parent = <&UIC0>;
interrupts = <0x4 0x4>;
/* This thing is a bit weird. It has it's own UIC
* that it uses to generate snapshot triggers. We
* don't really support this device yet, and it needs
* work to figure this out.
*/
dcr-reg = <0xe0 0x9>;
};
usb@ef603000 {
compatible = "ohci-be";
reg = <0xef603000 0x80>;
interrupts-parent = <&UIC0>;
interrupts = <0xd 0x4 0xe 0x4>;
};
dac@ef603300 {
compatible = "amcc,dac-405ez";
reg = <0xef603300 0x40>;
interrupt-parent = <&UIC0>;
interrupts = <0x18 0x4>;
};
adc@ef603400 {
compatible = "amcc,adc-405ez";
reg = <0xef603400 0x40>;
interrupt-parent = <&UIC0>;
interrupts = <0x17 0x4>;
};
spi@ef603500 {
compatible = "amcc,spi-405ez";
reg = <0xef603500 0x100>;
interrupt-parent = <&UIC0>;
interrupts = <0x9 0x4>;
};
};
EBC0: ebc {
compatible = "ibm,ebc-405ez", "ibm,ebc";
dcr-reg = <0x12 0x2>;
#address-cells = <2>;
#size-cells = <1>;
clock-frequency = <0>; /* Filled in by wrapper */
};
};
chosen {
linux,stdout-path = "/plb/opb/serial@ef600300";
};
};
/*
* Device Tree Source for Netstal Maschinen HCU4
* based on the IBM Walnut
*
* Copyright 2008
* Niklaus Giger <niklaus.giger@member.fsf.org>
*
* Copyright 2007 IBM Corp.
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without
* any warranty of any kind, whether express or implied.
*/
/dts-v1/;
/ {
#address-cells = <0x1>;
#size-cells = <0x1>;
model = "netstal,hcu4";
compatible = "netstal,hcu4";
dcr-parent = <0x1>;
aliases {
ethernet0 = "/plb/opb/ethernet@ef600800";
serial0 = "/plb/opb/serial@ef600300";
};
cpus {
#address-cells = <0x1>;
#size-cells = <0x0>;
cpu@0 {
device_type = "cpu";
model = "PowerPC,405GPr";
reg = <0x0>;
clock-frequency = <0>; /* Filled in by U-Boot */
timebase-frequency = <0x0>; /* Filled in by U-Boot */
i-cache-line-size = <0x20>;
d-cache-line-size = <0x20>;
i-cache-size = <0x4000>;
d-cache-size = <0x4000>;
dcr-controller;
dcr-access-method = "native";
linux,phandle = <0x1>;
};
};
memory {
device_type = "memory";
reg = <0x0 0x0>; /* Filled in by U-Boot */
};
UIC0: interrupt-controller {
compatible = "ibm,uic";
interrupt-controller;
cell-index = <0x0>;
dcr-reg = <0xc0 0x9>;
#address-cells = <0x0>;
#size-cells = <0x0>;
#interrupt-cells = <0x2>;
linux,phandle = <0x2>;
};
plb {
compatible = "ibm,plb3";
#address-cells = <0x1>;
#size-cells = <0x1>;
ranges;
clock-frequency = <0x0>; /* Filled in by U-Boot */
SDRAM0: memory-controller {
compatible = "ibm,sdram-405gp";
dcr-reg = <0x10 0x2>;
};
MAL: mcmal {
compatible = "ibm,mcmal-405gp", "ibm,mcmal";
dcr-reg = <0x180 0x62>;
num-tx-chans = <0x1>;
num-rx-chans = <0x1>;
interrupt-parent = <0x2>;
interrupts = <0xb 0x4 0xc 0x4 0xa 0x4 0xd 0x4 0xe 0x4>;
linux,phandle = <0x3>;
};
POB0: opb {
compatible = "ibm,opb-405gp", "ibm,opb";
#address-cells = <0x1>;
#size-cells = <0x1>;
ranges = <0xef600000 0xef600000 0xa00000>;
dcr-reg = <0xa0 0x5>;
clock-frequency = <0x0>; /* Filled in by U-Boot */
UART0: serial@ef600300 {
device_type = "serial";
compatible = "ns16550";
reg = <0xef600300 0x8>;
virtual-reg = <0xef600300>;
clock-frequency = <0x0>;/* Filled in by U-Boot */
current-speed = <0>; /* Filled in by U-Boot */
interrupt-parent = <0x2>;
interrupts = <0x0 0x4>;
};
IIC: i2c@ef600500 {
compatible = "ibm,iic-405gp", "ibm,iic";
reg = <0xef600500 0x11>;
interrupt-parent = <0x2>;
interrupts = <0x2 0x4>;
};
GPIO: gpio@ef600700 {
compatible = "ibm,gpio-405gp";
reg = <0xef600700 0x20>;
};
EMAC: ethernet@ef600800 {
device_type = "network";
compatible = "ibm,emac-405gp", "ibm,emac";
interrupt-parent = <0x2>;
interrupts = <0xf 0x4 0x9 0x4>;
local-mac-address = [00 00 00 00 00 00];
reg = <0xef600800 0x70>;
mal-device = <0x3>;
mal-tx-channel = <0x0>;
mal-rx-channel = <0x0>;
cell-index = <0x0>;
max-frame-size = <0x5dc>;
rx-fifo-size = <0x1000>;
tx-fifo-size = <0x800>;
phy-mode = "rmii";
phy-map = <0x1>;
};
};
EBC0: ebc {
compatible = "ibm,ebc-405gp", "ibm,ebc";
dcr-reg = <0x12 0x2>;
#address-cells = <0x2>;
#size-cells = <0x1>;
clock-frequency = <0x0>; /* Filled in by U-Boot */
sram@0,0 {
reg = <0x0 0x0 0x80000>;
};
flash@0,80000 {
compatible = "jedec-flash";
bank-width = <0x1>;
reg = <0x0 0x80000 0x80000>;
#address-cells = <0x1>;
#size-cells = <0x1>;
partition@0 {
label = "OpenBIOS";
reg = <0x0 0x80000>;
read-only;
};
};
};
};
chosen {
linux,stdout-path = "/plb/opb/serial@ef600300";
};
};
......@@ -121,6 +121,14 @@
compatible = "dallas,ds1339";
reg = <0x68>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8315erdb",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
spi@7000 {
......
......@@ -60,7 +60,7 @@
};
bcsr@f8000000 {
device_type = "board-control";
compatible = "fsl,mpc8323mds-bcsr";
reg = <0xf8000000 0x8000>;
};
......
......@@ -83,6 +83,14 @@
interrupts = <15 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
interrupt-parent = <&ipic>;
};
};
spi@7000 {
......@@ -131,6 +139,14 @@
interrupt-parent = <&ipic>;
interrupts = <71 8>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8349emitx",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
usb@22000 {
......
......@@ -81,6 +81,14 @@
interrupts = <15 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
interrupt-parent = <&ipic>;
};
};
spi@7000 {
......
......@@ -49,7 +49,7 @@
};
bcsr@e2400000 {
device_type = "board-control";
compatible = "fsl,mpc8349mds-bcsr";
reg = <0xe2400000 0x8000>;
};
......
......@@ -69,7 +69,7 @@
};
bcsr@1,0 {
device_type = "board-control";
compatible = "fsl,mpc8360mds-bcsr";
reg = <1 0 0x8000>;
};
};
......
......@@ -121,6 +121,14 @@
compatible = "dallas,ds1339";
reg = <0x68>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8377erdb",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
i2c@3100 {
......
......@@ -121,6 +121,14 @@
compatible = "dallas,ds1339";
reg = <0x68>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8378erdb",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
i2c@3100 {
......
......@@ -121,6 +121,14 @@
compatible = "dallas,ds1339";
reg = <0x68>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8379erdb",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
i2c@3100 {
......
......@@ -109,7 +109,7 @@
reg = <0x0 0x80>;
cell-index = <0>;
interrupt-parent = <&mpic>;
interrupts = <14 0x2>;
interrupts = <20 2>;
};
dma-channel@80 {
compatible = "fsl,mpc8536-dma-channel",
......@@ -117,7 +117,7 @@
reg = <0x80 0x80>;
cell-index = <1>;
interrupt-parent = <&mpic>;
interrupts = <15 0x2>;
interrupts = <21 2>;
};
dma-channel@100 {
compatible = "fsl,mpc8536-dma-channel",
......@@ -125,7 +125,7 @@
reg = <0x100 0x80>;
cell-index = <2>;
interrupt-parent = <&mpic>;
interrupts = <16 0x2>;
interrupts = <22 2>;
};
dma-channel@180 {
compatible = "fsl,mpc8536-dma-channel",
......@@ -133,7 +133,7 @@
reg = <0x180 0x80>;
cell-index = <3>;
interrupt-parent = <&mpic>;
interrupts = <17 0x2>;
interrupts = <23 2>;
};
};
......@@ -180,7 +180,7 @@
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
model = "TSEC";
model = "eTSEC";
compatible = "gianfar";
reg = <0x24000 0x1000>;
local-mac-address = [ 00 00 00 00 00 00 ];
......@@ -193,7 +193,7 @@
enet1: ethernet@26000 {
cell-index = <1>;
device_type = "network";
model = "TSEC";
model = "eTSEC";
compatible = "gianfar";
reg = <0x26000 0x1000>;
local-mac-address = [ 00 00 00 00 00 00 ];
......
......@@ -52,7 +52,7 @@
};
bcsr@f8000000 {
device_type = "board-control";
compatible = "fsl,mpc8568mds-bcsr";
reg = <0xf8000000 0x8000>;
};
......
......@@ -13,8 +13,8 @@
/ {
model = "fsl,MPC8572DS";
compatible = "fsl,MPC8572DS";
#address-cells = <1>;
#size-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
aliases {
ethernet0 = &enet0;
......@@ -61,7 +61,6 @@
memory {
device_type = "memory";
reg = <0x0 0x0>; // Filled by U-Boot
};
soc8572@ffe00000 {
......@@ -69,8 +68,8 @@
#size-cells = <1>;
device_type = "soc";
compatible = "simple-bus";
ranges = <0x0 0xffe00000 0x100000>;
reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
ranges = <0x0 0 0xffe00000 0x100000>;
reg = <0 0xffe00000 0 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
bus-frequency = <0>; // Filled out by uboot.
memory-controller@2000 {
......@@ -351,10 +350,10 @@
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <0xffe08000 0x1000>;
reg = <0 0xffe08000 0 0x1000>;
bus-range = <0 255>;
ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <24 2>;
......@@ -561,10 +560,10 @@
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <0xffe09000 0x1000>;
reg = <0 0xffe09000 0 0x1000>;
bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <26 2>;
......@@ -598,10 +597,10 @@
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <0xffe0a000 0x1000>;
reg = <0 0xffe0a000 0 0x1000>;
bus-range = <0 255>;
ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <27 2>;
......
......@@ -105,6 +105,11 @@ static int fdt_wrapper_setprop(const void *devp, const char *name,
return check_err(rc);
}
static int fdt_wrapper_del_node(const void *devp)
{
return fdt_del_node(fdt, devp_offset(devp));
}
static void *fdt_wrapper_get_parent(const void *devp)
{
return offset_devp(fdt_parent_offset(fdt, devp_offset(devp)));
......@@ -165,6 +170,7 @@ static unsigned long fdt_wrapper_finalize(void)
void fdt_init(void *blob)
{
int err;
int bufsize;
dt_ops.finddevice = fdt_wrapper_finddevice;
dt_ops.getprop = fdt_wrapper_getprop;
......@@ -173,21 +179,21 @@ void fdt_init(void *blob)
dt_ops.create_node = fdt_wrapper_create_node;
dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value;
dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible;
dt_ops.del_node = fdt_wrapper_del_node;
dt_ops.get_path = fdt_wrapper_get_path;
dt_ops.finalize = fdt_wrapper_finalize;
/* Make sure the dt blob is the right version and so forth */
fdt = blob;
err = fdt_open_into(fdt, fdt, fdt_totalsize(blob));
if (err == -FDT_ERR_NOSPACE) {
int bufsize = fdt_totalsize(fdt) + 4;
buf = malloc(bufsize);
err = fdt_open_into(fdt, buf, bufsize);
}
bufsize = fdt_totalsize(fdt) + 4;
buf = malloc(bufsize);
if(!buf)
fatal("malloc failed. can't relocate the device tree\n\r");
err = fdt_open_into(fdt, buf, bufsize);
if (err != 0)
fatal("fdt_init(): %s\n\r", fdt_strerror(err));
if (buf)
fdt = buf;
fdt = buf;
}
......@@ -56,9 +56,19 @@ static struct addr_range prep_kernel(void)
if (platform_ops.vmlinux_alloc) {
addr = platform_ops.vmlinux_alloc(ei.memsize);
} else {
if ((unsigned long)_start < ei.memsize)
/*
* Check if the kernel image (without bss) would overwrite the
* bootwrapper. The device tree has been moved in fdt_init()
* to an area allocated with malloc() (somewhere past _end).
*/
if ((unsigned long)_start < ei.loadsize)
fatal("Insufficient memory for kernel at address 0!"
" (_start=%p)\n\r", _start);
" (_start=%p, uncomressed size=%08x)\n\r",
_start, ei.loadsize);
if ((unsigned long)_end < ei.memsize)
fatal("The final kernel image would overwrite the "
"device tree\n\r");
}
/* Finally, gunzip the kernel */
......
......@@ -40,6 +40,7 @@ struct dt_ops {
const int buflen);
int (*setprop)(const void *phandle, const char *name,
const void *buf, const int buflen);
int (*del_node)(const void *phandle);
void *(*get_parent)(const void *phandle);
/* The node must not already exist. */
void *(*create_node)(const void *parent, const char *name);
......@@ -126,6 +127,11 @@ static inline int setprop_str(void *devp, const char *name, const char *buf)
return -1;
}
static inline int del_node(const void *devp)
{
return dt_ops.del_node ? dt_ops.del_node(devp) : -1;
}
static inline void *get_parent(const char *devp)
{
return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL;
......
......@@ -235,7 +235,7 @@ memchr:
.globl memcmp
memcmp:
cmpwi 0,r5,0
blelr
ble 2f
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
......@@ -244,6 +244,8 @@ memcmp:
subf. r3,r0,r3
bdnzt 2,1b
blr
2: li r3,0
blr
/*
......
......@@ -306,11 +306,14 @@ fi
# post-processing needed for some platforms
case "$platform" in
pseries|chrp)
pseries)
${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote
$objbin/addnote "$ofile" "$ofile".rpanote
rm -r "$ofile".rpanote
;;
chrp)
$objbin/addnote -r c00000 "$ofile"
;;
coff)
${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile"
$objbin/hack-coff "$ofile"
......
此差异已折叠。
此差异已折叠。
......@@ -9,6 +9,12 @@
* Reserve to the end of the FWNMI area, see head_64.S */
#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
/*
* Used to differentiate between relocatable kdump kernel and other
* kernels
*/
#define KDUMP_SIGNATURE 0xfeed1234
#ifdef CONFIG_CRASH_DUMP
#define KDUMP_TRAMPOLINE_START 0x0100
......@@ -19,17 +25,18 @@
#endif /* CONFIG_CRASH_DUMP */
#ifndef __ASSEMBLY__
#ifdef CONFIG_CRASH_DUMP
extern unsigned long __kdump_flag;
#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
extern void reserve_kdump_trampoline(void);
extern void setup_kdump_trampoline(void);
#else /* !CONFIG_CRASH_DUMP */
#else
/* !CRASH_DUMP || RELOCATABLE */
static inline void reserve_kdump_trampoline(void) { ; }
static inline void setup_kdump_trampoline(void) { ; }
#endif
#endif /* CONFIG_CRASH_DUMP */
#endif /* __ASSEMBLY__ */
#endif /* __PPC64_KDUMP_H */
......@@ -77,6 +77,7 @@
#if defined(CONFIG_RELOCATABLE)
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
#endif
......
......@@ -1277,6 +1277,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
{
/* 405EZ */
.pvr_mask = 0xffff0000,
.pvr_value = 0x41510000,
.cpu_name = "405EZ",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
......
......@@ -30,6 +30,7 @@
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
#ifndef CONFIG_RELOCATABLE
void __init reserve_kdump_trampoline(void)
{
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
......@@ -68,6 +69,7 @@ void __init setup_kdump_trampoline(void)
DBG(" <- setup_kdump_trampoline()\n");
}
#endif /* CONFIG_RELOCATABLE */
/*
* Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
......
......@@ -97,6 +97,12 @@ __secondary_hold_spinloop:
__secondary_hold_acknowledge:
.llong 0x0
/* This flag is set by purgatory if we should be a kdump kernel. */
/* Do not move this variable as purgatory knows about it. */
.globl __kdump_flag
__kdump_flag:
.llong 0x0
#ifdef CONFIG_PPC_ISERIES
/*
* At offset 0x20, there is a pointer to iSeries LPAR data.
......@@ -1384,7 +1390,13 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
mr r3,r25
#ifdef CONFIG_CRASH_DUMP
ld r7,__kdump_flag-_stext(r26)
cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */
bne 1f
add r25,r25,r26
#endif
1: mr r3,r25
bl .relocate
#endif
......@@ -1398,11 +1410,26 @@ _STATIC(__after_prom_start)
li r3,0 /* target addr */
mr. r4,r26 /* In some cases the loader may */
beq 9f /* have already put us at zero */
lis r5,(copy_to_here - _stext)@ha
addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */
#ifdef CONFIG_CRASH_DUMP
/*
* Check if the kernel has to be running as relocatable kernel based on the
* variable __kdump_flag, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
ld r7,__kdump_flag-_stext(r26)
cmpldi cr0,r7,1
bne 3f
li r5,__end_interrupts - _stext /* just copy interrupts */
b 5f
3:
#endif
lis r5,(copy_to_here - _stext)@ha
addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
bl .copy_and_flush /* copy the first n bytes */
/* this includes the code being */
/* executed here. */
......@@ -1411,15 +1438,15 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
addis r5,r26,(p_end - _stext)@ha
ld r5,(p_end - _stext)@l(r5) /* get _end */
bl .copy_and_flush /* copy the rest */
5: bl .copy_and_flush /* copy the rest */
9: b .start_here_multiplatform
p_end: .llong _end - _stext
/*
* Copy routine used to copy the kernel to start at physical address 0
* and flush and invalidate the caches as needed.
......
......@@ -458,6 +458,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
static void iommu_table_clear(struct iommu_table *tbl)
{
if (!__kdump_flag) {
/* Clear the table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
return;
}
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
unsigned long index, tceval, tcecount = 0;
/* Reserve the existing mappings left by the first kernel. */
for (index = 0; index < tbl->it_size; index++) {
tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
__set_bit(index, tbl->it_map);
tcecount++;
}
}
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
printk(KERN_WARNING "TCE table is full; freeing ");
printk(KERN_WARNING "%d entries for the kdump boot\n",
KDUMP_MIN_TCE_ENTRIES);
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
index < tbl->it_size; index++)
__clear_bit(index, tbl->it_map);
}
}
#endif
}
/*
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
......@@ -484,38 +520,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
tbl->it_largehint = tbl->it_halfpoint;
spin_lock_init(&tbl->it_lock);
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
unsigned long index;
unsigned long tceval;
unsigned long tcecount = 0;
/*
* Reserve the existing mappings left by the first kernel.
*/
for (index = 0; index < tbl->it_size; index++) {
tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
__set_bit(index, tbl->it_map);
tcecount++;
}
}
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
printk(KERN_WARNING "TCE table is full; ");
printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
KDUMP_MIN_TCE_ENTRIES);
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
index < tbl->it_size; index++)
__clear_bit(index, tbl->it_map);
}
}
#else
/* Clear the hardware table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
iommu_table_clear(tbl);
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
......
......@@ -88,11 +88,13 @@ void __init reserve_crashkernel(void)
crash_size = crashk_res.end - crashk_res.start + 1;
#ifndef CONFIG_RELOCATABLE
if (crashk_res.start != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
#endif
crash_size = PAGE_ALIGN(crash_size);
crashk_res.end = crashk_res.start + crash_size - 1;
......
......@@ -255,11 +255,14 @@ static union thread_union kexec_stack
/* Our assembly helper, in kexec_stub.S */
extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
void *image, void *control,
void (*clear_all)(void)) ATTRIB_NORET;
void (*clear_all)(void),
unsigned long kdump_flag) ATTRIB_NORET;
/* too late to fail here */
void default_machine_kexec(struct kimage *image)
{
unsigned long kdump_flag = 0;
/* prepare control code if any */
/*
......@@ -270,8 +273,10 @@ void default_machine_kexec(struct kimage *image)
* using debugger IPI.
*/
if (crashing_cpu == -1)
kexec_prepare_cpus();
if (crashing_cpu == -1)
kexec_prepare_cpus();
else
kdump_flag = KDUMP_SIGNATURE;
/* switch to a staticly allocated stack. Based on irq stack code.
* XXX: the task struct will likely be invalid once we do the copy!
......@@ -284,7 +289,7 @@ void default_machine_kexec(struct kimage *image)
*/
kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page),
ppc_md.hpte_clear_all);
ppc_md.hpte_clear_all, kdump_flag);
/* NOTREACHED */
}
......@@ -312,11 +317,24 @@ static struct property kernel_end_prop = {
static void __init export_htab_values(void)
{
struct device_node *node;
struct property *prop;
node = of_find_node_by_path("/chosen");
if (!node)
return;
/* remove any stale propertys so ours can be found */
prop = of_find_property(node, kernel_end_prop.name, NULL);
if (prop)
prom_remove_property(node, prop);
prop = of_find_property(node, htab_base_prop.name, NULL);
if (prop)
prom_remove_property(node, prop);
prop = of_find_property(node, htab_size_prop.name, NULL);
if (prop)
prom_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
kernel_end = __pa(_end);
prom_add_property(node, &kernel_end_prop);
......
......@@ -611,10 +611,12 @@ real_mode: /* assume normal blr return */
/*
* kexec_sequence(newstack, start, image, control, clear_all())
* kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag)
*
* does the grungy work with stack switching and real mode switches
* also does simple calls to other code
*
* kdump_flag says whether the next kernel should be a kdump kernel.
*/
_GLOBAL(kexec_sequence)
......@@ -647,7 +649,7 @@ _GLOBAL(kexec_sequence)
mr r29,r5 /* image (virt) */
mr r28,r6 /* control, unused */
mr r27,r7 /* clear_all() fn desc */
mr r26,r8 /* spare */
mr r26,r8 /* kdump flag */
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
......@@ -709,5 +711,6 @@ _GLOBAL(kexec_sequence)
mr r4,r30 # start, aka phys mem offset
mtlr 4
li r5,0
blr /* image->start(physid, image->start, 0); */
mr r6,r26 /* kdump_flag */
blr /* image->start(physid, image->start, 0, kdump_flag); */
#endif /* CONFIG_KEXEC */
......@@ -610,7 +610,8 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
......
......@@ -487,67 +487,6 @@ static int __init prom_setprop(phandle node, const char *nodename,
return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
}
/* We can't use the standard versions because of RELOC headaches. */
#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
|| ('a' <= (c) && (c) <= 'f') \
|| ('A' <= (c) && (c) <= 'F'))
#define isdigit(c) ('0' <= (c) && (c) <= '9')
#define islower(c) ('a' <= (c) && (c) <= 'z')
#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
unsigned long prom_strtoul(const char *cp, const char **endp)
{
unsigned long result = 0, base = 10, value;
if (*cp == '0') {
base = 8;
cp++;
if (toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
while (isxdigit(*cp) &&
(value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
if (endp)
*endp = cp;
return result;
}
unsigned long prom_memparse(const char *ptr, const char **retptr)
{
unsigned long ret = prom_strtoul(ptr, retptr);
int shift = 0;
/*
* We can't use a switch here because GCC *may* generate a
* jump table which won't work, because we're not running at
* the address we're linked at.
*/
if ('G' == **retptr || 'g' == **retptr)
shift = 30;
if ('M' == **retptr || 'm' == **retptr)
shift = 20;
if ('K' == **retptr || 'k' == **retptr)
shift = 10;
if (shift) {
ret <<= shift;
(*retptr)++;
}
return ret;
}
/*
* Early parsing of the command line passed to the kernel, used for
* "mem=x" and the options that affect the iommu
......
......@@ -20,7 +20,7 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
_end enter_prom memcpy memset reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
reloc_got2 kernstart_addr"
reloc_got2 kernstart_addr memstart_addr"
NM="$1"
OBJ="$2"
......
......@@ -59,6 +59,7 @@
#include <asm/mmu.h>
#include <asm/xmon.h>
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
#include "setup.h"
......@@ -190,6 +191,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (ppc_md.show_cpuinfo != NULL)
ppc_md.show_cpuinfo(m);
#ifdef CONFIG_PPC32
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n",
(unsigned int)(total_memory / (1024 * 1024)));
#endif
return 0;
}
......
......@@ -235,8 +235,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
else
for (i = 0; i < 32 ; i++)
current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
#else
#endif
return err;
}
......
......@@ -142,7 +142,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
speed = (clock / prescaler) / (divisor * 16);
/* sanity check */
if (speed < 0 || speed > (clock / 16))
if (speed > (clock / 16))
speed = 9600;
return speed;
......
......@@ -382,8 +382,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
printk(KERN_INFO "Huge page(16GB) memory: "
"addr = 0x%lX size = 0x%lX pages = %d\n",
phys_addr, block_size, expected_pages);
lmb_reserve(phys_addr, block_size * expected_pages);
add_gpage(phys_addr, block_size, expected_pages);
if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
lmb_reserve(phys_addr, block_size * expected_pages);
add_gpage(phys_addr, block_size, expected_pages);
}
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */
......
......@@ -116,6 +116,7 @@ static int __init get_active_region_work_fn(unsigned long start_pfn,
/*
* get_node_active_region - Return active region containing start_pfn
* Active range returned is empty if none found.
* @start_pfn: The page to return the region for.
* @node_ar: Returned set to the active region containing start_pfn
*/
......@@ -126,6 +127,7 @@ static void __init get_node_active_region(unsigned long start_pfn,
node_ar->nid = nid;
node_ar->start_pfn = start_pfn;
node_ar->end_pfn = start_pfn;
work_with_active_regions(nid, get_active_region_work_fn, node_ar);
}
......@@ -526,12 +528,10 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
/*
* We use lmb_end_of_DRAM() in here instead of memory_limit because
* we've already adjusted it for the limit and it takes care of
* having memory holes below the limit.
* having memory holes below the limit. Also, in the case of
* iommu_is_off, memory_limit is not set but is implicitly enforced.
*/
if (! memory_limit)
return size;
if (start + size <= lmb_end_of_DRAM())
return size;
......@@ -933,18 +933,20 @@ void __init do_init_bootmem(void)
struct node_active_region node_ar;
get_node_active_region(start_pfn, &node_ar);
while (start_pfn < end_pfn) {
while (start_pfn < end_pfn &&
node_ar.start_pfn < node_ar.end_pfn) {
unsigned long reserve_size = size;
/*
* if reserved region extends past active region
* then trim size to active region
*/
if (end_pfn > node_ar.end_pfn)
size = (node_ar.end_pfn << PAGE_SHIFT)
reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- (start_pfn << PAGE_SHIFT);
dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, size,
node_ar.nid);
dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
reserve_size, node_ar.nid);
reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
size, BOOTMEM_DEFAULT);
reserve_size, BOOTMEM_DEFAULT);
/*
* if reserved region is contained in the active region
* then done.
......@@ -959,6 +961,7 @@ void __init do_init_bootmem(void)
*/
start_pfn = node_ar.end_pfn;
physbase = start_pfn << PAGE_SHIFT;
size = size - reserve_size;
get_node_active_region(start_pfn, &node_ar);
}
......
......@@ -24,6 +24,11 @@
#define SKIP_GENERIC_SYNC 0
#define SYNC_START_ERROR -1
#define DO_GENERIC_SYNC 1
#define SPUS_PER_NODE 8
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
extern struct delayed_work spu_work;
extern int spu_prof_running;
struct spu_overlay_info { /* map of sections within an SPU overlay */
unsigned int vma; /* SPU virtual memory address from elf */
......@@ -62,6 +67,14 @@ struct vma_to_fileoffset_map { /* map of sections within an SPU program */
};
struct spu_buffer {
int last_guard_val;
int ctx_sw_seen;
unsigned long *buff;
unsigned int head, tail;
};
/* The three functions below are for maintaining and accessing
* the vma-to-fileoffset map.
*/
......
......@@ -23,12 +23,11 @@
static u32 *samples;
static int spu_prof_running;
int spu_prof_running;
static unsigned int profiling_interval;
#define NUM_SPU_BITS_TRBUF 16
#define SPUS_PER_TB_ENTRY 4
#define SPUS_PER_NODE 8
#define SPU_PC_MASK 0xFFFF
......@@ -208,6 +207,7 @@ int start_spu_profiling(unsigned int cycles_reset)
spu_prof_running = 1;
hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
return 0;
}
......
......@@ -35,7 +35,102 @@ static DEFINE_SPINLOCK(buffer_lock);
static DEFINE_SPINLOCK(cache_lock);
static int num_spu_nodes;
int spu_prof_num_nodes;
int last_guard_val[MAX_NUMNODES * 8];
struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
struct delayed_work spu_work;
static unsigned max_spu_buff;
static void spu_buff_add(unsigned long int value, int spu)
{
/* spu buff is a circular buffer. Add entries to the
* head. Head is the index to store the next value.
* The buffer is full when there is one available entry
* in the queue, i.e. head and tail can't be equal.
* That way we can tell the difference between the
* buffer being full versus empty.
*
* ASSUPTION: the buffer_lock is held when this function
* is called to lock the buffer, head and tail.
*/
int full = 1;
if (spu_buff[spu].head >= spu_buff[spu].tail) {
if ((spu_buff[spu].head - spu_buff[spu].tail)
< (max_spu_buff - 1))
full = 0;
} else if (spu_buff[spu].tail > spu_buff[spu].head) {
if ((spu_buff[spu].tail - spu_buff[spu].head)
> 1)
full = 0;
}
if (!full) {
spu_buff[spu].buff[spu_buff[spu].head] = value;
spu_buff[spu].head++;
if (spu_buff[spu].head >= max_spu_buff)
spu_buff[spu].head = 0;
} else {
/* From the user's perspective make the SPU buffer
* size management/overflow look like we are using
* per cpu buffers. The user uses the same
* per cpu parameter to adjust the SPU buffer size.
* Increment the sample_lost_overflow to inform
* the user the buffer size needs to be increased.
*/
oprofile_cpu_buffer_inc_smpl_lost();
}
}
/* This function copies the per SPU buffers to the
* OProfile kernel buffer.
*/
void sync_spu_buff(void)
{
int spu;
unsigned long flags;
int curr_head;
for (spu = 0; spu < num_spu_nodes; spu++) {
/* In case there was an issue and the buffer didn't
* get created skip it.
*/
if (spu_buff[spu].buff == NULL)
continue;
/* Hold the lock to make sure the head/tail
* doesn't change while spu_buff_add() is
* deciding if the buffer is full or not.
* Being a little paranoid.
*/
spin_lock_irqsave(&buffer_lock, flags);
curr_head = spu_buff[spu].head;
spin_unlock_irqrestore(&buffer_lock, flags);
/* Transfer the current contents to the kernel buffer.
* data can still be added to the head of the buffer.
*/
oprofile_put_buff(spu_buff[spu].buff,
spu_buff[spu].tail,
curr_head, max_spu_buff);
spin_lock_irqsave(&buffer_lock, flags);
spu_buff[spu].tail = curr_head;
spin_unlock_irqrestore(&buffer_lock, flags);
}
}
static void wq_sync_spu_buff(struct work_struct *work)
{
/* move data from spu buffers to kernel buffer */
sync_spu_buff();
/* only reschedule if profiling is not done */
if (spu_prof_running)
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
}
/* Container for caching information about an active SPU task. */
struct cached_info {
......@@ -305,14 +400,21 @@ static int process_context_switch(struct spu *spu, unsigned long objectId)
/* Record context info in event buffer */
spin_lock_irqsave(&buffer_lock, flags);
add_event_entry(ESCAPE_CODE);
add_event_entry(SPU_CTX_SWITCH_CODE);
add_event_entry(spu->number);
add_event_entry(spu->pid);
add_event_entry(spu->tgid);
add_event_entry(app_dcookie);
add_event_entry(spu_cookie);
add_event_entry(offset);
spu_buff_add(ESCAPE_CODE, spu->number);
spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
spu_buff_add(spu->number, spu->number);
spu_buff_add(spu->pid, spu->number);
spu_buff_add(spu->tgid, spu->number);
spu_buff_add(app_dcookie, spu->number);
spu_buff_add(spu_cookie, spu->number);
spu_buff_add(offset, spu->number);
/* Set flag to indicate SPU PC data can now be written out. If
* the SPU program counter data is seen before an SPU context
* record is seen, the postprocessing will fail.
*/
spu_buff[spu->number].ctx_sw_seen = 1;
spin_unlock_irqrestore(&buffer_lock, flags);
smp_wmb(); /* insure spu event buffer updates are written */
/* don't want entries intermingled... */
......@@ -360,6 +462,47 @@ static int number_of_online_nodes(void)
return nodes;
}
static int oprofile_spu_buff_create(void)
{
int spu;
max_spu_buff = oprofile_get_cpu_buffer_size();
for (spu = 0; spu < num_spu_nodes; spu++) {
/* create circular buffers to store the data in.
* use locks to manage accessing the buffers
*/
spu_buff[spu].head = 0;
spu_buff[spu].tail = 0;
/*
* Create a buffer for each SPU. Can't reliably
* create a single buffer for all spus due to not
* enough contiguous kernel memory.
*/
spu_buff[spu].buff = kzalloc((max_spu_buff
* sizeof(unsigned long)),
GFP_KERNEL);
if (!spu_buff[spu].buff) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: oprofile_spu_buff_create "
"failed to allocate spu buffer %d.\n",
__func__, __LINE__, spu);
/* release the spu buffers that have been allocated */
while (spu >= 0) {
kfree(spu_buff[spu].buff);
spu_buff[spu].buff = 0;
spu--;
}
return -ENOMEM;
}
}
return 0;
}
/* The main purpose of this function is to synchronize
* OProfile with SPUFS by registering to be notified of
* SPU task switches.
......@@ -372,20 +515,35 @@ static int number_of_online_nodes(void)
*/
int spu_sync_start(void)
{
int k;
int spu;
int ret = SKIP_GENERIC_SYNC;
int register_ret;
unsigned long flags = 0;
spu_prof_num_nodes = number_of_online_nodes();
num_spu_nodes = spu_prof_num_nodes * 8;
INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
/* create buffer for storing the SPU data to put in
* the kernel buffer.
*/
ret = oprofile_spu_buff_create();
if (ret)
goto out;
spin_lock_irqsave(&buffer_lock, flags);
add_event_entry(ESCAPE_CODE);
add_event_entry(SPU_PROFILING_CODE);
add_event_entry(num_spu_nodes);
for (spu = 0; spu < num_spu_nodes; spu++) {
spu_buff_add(ESCAPE_CODE, spu);
spu_buff_add(SPU_PROFILING_CODE, spu);
spu_buff_add(num_spu_nodes, spu);
}
spin_unlock_irqrestore(&buffer_lock, flags);
for (spu = 0; spu < num_spu_nodes; spu++) {
spu_buff[spu].ctx_sw_seen = 0;
spu_buff[spu].last_guard_val = 0;
}
/* Register for SPU events */
register_ret = spu_switch_event_register(&spu_active);
if (register_ret) {
......@@ -393,8 +551,6 @@ int spu_sync_start(void)
goto out;
}
for (k = 0; k < (MAX_NUMNODES * 8); k++)
last_guard_val[k] = 0;
pr_debug("spu_sync_start -- running.\n");
out:
return ret;
......@@ -446,13 +602,20 @@ void spu_sync_buffer(int spu_num, unsigned int *samples,
* use. We need to discard samples taken during the time
* period which an overlay occurs (i.e., guard value changes).
*/
if (grd_val && grd_val != last_guard_val[spu_num]) {
last_guard_val[spu_num] = grd_val;
if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
spu_buff[spu_num].last_guard_val = grd_val;
/* Drop the rest of the samples. */
break;
}
add_event_entry(file_offset | spu_num_shifted);
/* We must ensure that the SPU context switch has been written
* out before samples for the SPU. Otherwise, the SPU context
* information is not available and the postprocessing of the
* SPU PC will fail with no available anonymous map information.
*/
if (spu_buff[spu_num].ctx_sw_seen)
spu_buff_add((file_offset | spu_num_shifted),
spu_num);
}
spin_unlock(&buffer_lock);
out:
......@@ -463,20 +626,41 @@ void spu_sync_buffer(int spu_num, unsigned int *samples,
int spu_sync_stop(void)
{
unsigned long flags = 0;
int ret = spu_switch_event_unregister(&spu_active);
if (ret) {
int ret;
int k;
ret = spu_switch_event_unregister(&spu_active);
if (ret)
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: spu_switch_event_unregister returned %d\n",
__func__, __LINE__, ret);
goto out;
}
"%s, line %d: spu_switch_event_unregister " \
"returned %d\n",
__func__, __LINE__, ret);
/* flush any remaining data in the per SPU buffers */
sync_spu_buff();
spin_lock_irqsave(&cache_lock, flags);
ret = release_cached_info(RELEASE_ALL);
spin_unlock_irqrestore(&cache_lock, flags);
out:
/* remove scheduled work queue item rather then waiting
* for every queued entry to execute. Then flush pending
* system wide buffer to event buffer.
*/
cancel_delayed_work(&spu_work);
for (k = 0; k < num_spu_nodes; k++) {
spu_buff[k].ctx_sw_seen = 0;
/*
* spu_sys_buff will be null if there was a problem
* allocating the buffer. Only delete if it exists.
*/
kfree(spu_buff[k].buff);
spu_buff[k].buff = 0;
}
pr_debug("spu_sync_stop -- done.\n");
return ret;
}
......@@ -14,6 +14,15 @@
# help
# This option enables support for the CPCI405 board.
config ACADIA
bool "Acadia"
depends on 40x
default n
select PPC40x_SIMPLE
select 405EZ
help
This option enables support for the AMCC 405EZ Acadia evaluation board.
config EP405
bool "EP405/EP405PC"
depends on 40x
......@@ -23,6 +32,14 @@ config EP405
help
This option enables support for the EP405/EP405PC boards.
config HCU4
bool "Hcu4"
depends on 40x
default y
select 405GPR
help
This option enables support for the Nestal Maschinen HCU4 board.
config KILAUEA
bool "Kilauea"
depends on 40x
......@@ -93,6 +110,13 @@ config XILINX_VIRTEX_GENERIC_BOARD
Most Virtex designs should use this unless it needs to do some
special configuration at board probe time.
config PPC40x_SIMPLE
bool "Simple PowerPC 40x board support"
depends on 40x
default n
help
This option enables the simple PowerPC 40x platform support.
# 40x specific CPU modules, selected based on the board above.
config NP405H
bool
......@@ -118,6 +142,12 @@ config 405EX
select IBM_NEW_EMAC_EMAC4
select IBM_NEW_EMAC_RGMII
config 405EZ
bool
select IBM_NEW_EMAC_NO_FLOW_CTRL
select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
select IBM_NEW_EMAC_MAL_COMMON_ERR
config 405GPR
bool
......@@ -139,6 +169,14 @@ config STB03xxx
select IBM405_ERR77
select IBM405_ERR51
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 40x
select ARCH_REQUIRE_GPIOLIB
select GENERIC_GPIO
help
Enable gpiolib support for ppc40x based boards
# 40x errata/workaround config symbols, selected by the CPU models above
# All 405-based cores up until the 405GPR and 405EP have this errata.
......
obj-$(CONFIG_KILAUEA) += kilauea.o
obj-$(CONFIG_HCU4) += hcu4.o
obj-$(CONFIG_MAKALU) += makalu.o
obj-$(CONFIG_WALNUT) += walnut.o
obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
obj-$(CONFIG_EP405) += ep405.o
obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o
/*
* Architecture- / platform-specific boot-time initialization code for
* IBM PowerPC 4xx based boards. Adapted from original
* code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
* <dan@net4x.com>.
*
* Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 IBM Corporation
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* 2002 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
static __initdata struct of_device_id hcu4_of_bus[] = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init hcu4_device_probe(void)
{
of_platform_bus_probe(NULL, hcu4_of_bus, NULL);
return 0;
}
machine_device_initcall(hcu4, hcu4_device_probe);
static int __init hcu4_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "netstal,hcu4"))
return 0;
return 1;
}
define_machine(hcu4) {
.name = "HCU4",
.probe = hcu4_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
/*
* Generic PowerPC 40x platform support
*
* Copyright 2008 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*
* This implements simple platform support for PowerPC 44x chips. This is
* mostly used for eval boards or other simple and "generic" 44x boards. If
* your board has custom functions or hardware, then you will likely want to
* implement your own board.c file to accommodate it.
*/
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
#include <linux/init.h>
#include <linux/of_platform.h>
static __initdata struct of_device_id ppc40x_of_bus[] = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{ .compatible = "simple-bus", },
{},
};
static int __init ppc40x_device_probe(void)
{
of_platform_bus_probe(NULL, ppc40x_of_bus, NULL);
return 0;
}
machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
/* This is the list of boards that can be supported by this simple
* platform code. This does _not_ mean the boards are compatible,
* as they most certainly are not from a device tree perspective.
* However, their differences are handled by the device tree and the
* drivers and therefore they don't need custom board support files.
*
* Again, if your board needs to do things differently then create a
* board.c file for it rather than adding it to this list.
*/
static char *board[] __initdata = {
"amcc,acadia"
};
static int __init ppc40x_probe(void)
{
unsigned long root = of_get_flat_dt_root();
int i = 0;
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
return 1;
}
}
return 0;
}
define_machine(ppc40x_simple) {
.name = "PowerPC 40x Platform",
.probe = ppc40x_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
......@@ -167,6 +167,14 @@ config PPC44x_SIMPLE
help
This option enables the simple PowerPC 44x platform support.
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 44x
select ARCH_REQUIRE_GPIOLIB
select GENERIC_GPIO
help
Enable gpiolib support for ppc440 based boards
# 44x specific CPU modules, selected based on the board above.
config 440EP
bool
......
......@@ -99,11 +99,14 @@ mpc5200_setup_xlb_arbiter(void)
out_be32(&xlb->master_pri_enable, 0xff);
out_be32(&xlb->master_priority, 0x11111111);
/* Disable XLB pipelining
/*
* Disable XLB pipelining
* (cfr errate 292. We could do this only just before ATA PIO
* transaction and re-enable it afterwards ...)
* Not needed on MPC5200B.
*/
out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
iounmap(xlb);
}
......
......@@ -193,7 +193,6 @@ static void __init ksi8560_setup_arch(void)
static void ksi8560_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -215,9 +214,6 @@ static void ksi8560_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -207,7 +207,6 @@ static void __init mpc85xx_ads_setup_arch(void)
static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -219,9 +218,6 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -307,7 +307,6 @@ static void __init mpc85xx_cds_setup_arch(void)
static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -320,9 +319,6 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
......
......@@ -136,7 +136,6 @@ static void __init sbc8548_setup_arch(void)
static void sbc8548_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -149,9 +148,6 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -194,7 +194,6 @@ static void __init sbc8560_setup_arch(void)
static void sbc8560_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -206,9 +205,6 @@ static void sbc8560_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -130,7 +130,6 @@ static void __init stx_gp3_setup_arch(void)
static void stx_gp3_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -142,9 +141,6 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -138,7 +138,6 @@ static void __init tqm85xx_setup_arch(void)
static void tqm85xx_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
uint memsize = total_memory;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
......@@ -150,9 +149,6 @@ static void tqm85xx_show_cpuinfo(struct seq_file *m)
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static struct of_device_id __initdata of_bus_ids[] = {
......
......@@ -127,7 +127,6 @@ static unsigned int gef_sbc610_get_fpga_rev(void)
static void gef_sbc610_show_cpuinfo(struct seq_file *m)
{
uint memsize = total_memory;
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n");
......@@ -137,7 +136,6 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m)
seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
......
......@@ -101,13 +101,11 @@ mpc86xx_hpcn_setup_arch(void)
static void
mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
{
uint memsize = total_memory;
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
......
......@@ -63,13 +63,11 @@ sbc8641_setup_arch(void)
static void
sbc8641_show_cpuinfo(struct seq_file *m)
{
uint memsize = total_memory;
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: Wind River Systems\n");
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
......
......@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
#include <asm/kdump.h>
#include "ras.h"
......@@ -111,9 +112,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
int ret = -ENOMEM;
unsigned long addr;
#ifdef CONFIG_CRASH_DUMP
rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
#endif
if (__kdump_flag)
rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
......
......@@ -54,8 +54,8 @@
#endif
/*
* The primary thread of each non-boot processor is recorded here before
* smp init.
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
static cpumask_t of_spin_map;
......@@ -208,11 +208,7 @@ void __init smp_init_cell(void)
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (i % 2 == 0)
/*
* Even-numbered logical cpus correspond to
* primary threads.
*/
if (cpu_thread_in_core(i) == 0)
cpu_set(i, of_spin_map);
}
} else {
......
......@@ -548,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
int ret;
struct spu_context *ctx = file->private_data;
/* pre-check for file position: if we'd return EOF, there's no point
* causing a deschedule */
if (*pos >= sizeof(ctx->csa.lscsa->gprs))
return 0;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
......@@ -2426,38 +2431,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
static int spufs_switch_log_open(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int rc;
rc = spu_acquire(ctx);
if (rc)
return rc;
/*
* We (ab-)use the mapping_lock here because it serves the similar
* purpose for synchronizing open/close elsewhere. Maybe it should
* be renamed eventually.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->switch_log) {
spin_lock(&ctx->switch_log->lock);
ctx->switch_log->head = 0;
ctx->switch_log->tail = 0;
spin_unlock(&ctx->switch_log->lock);
} else {
/*
* We allocate the switch log data structures on first open.
* They will never be free because we assume a context will
* be traced until it goes away.
*/
ctx->switch_log = kzalloc(sizeof(struct switch_log) +
SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
GFP_KERNEL);
if (!ctx->switch_log)
goto out;
spin_lock_init(&ctx->switch_log->lock);
init_waitqueue_head(&ctx->switch_log->wait);
rc = -EBUSY;
goto out;
}
mutex_unlock(&ctx->mapping_lock);
ctx->switch_log = kmalloc(sizeof(struct switch_log) +
SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
GFP_KERNEL);
if (!ctx->switch_log) {
rc = -ENOMEM;
goto out;
}
ctx->switch_log->head = ctx->switch_log->tail = 0;
init_waitqueue_head(&ctx->switch_log->wait);
rc = 0;
out:
spu_release(ctx);
return rc;
}
static int spufs_switch_log_release(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int rc;
rc = spu_acquire(ctx);
if (rc)
return rc;
kfree(ctx->switch_log);
ctx->switch_log = NULL;
spu_release(ctx);
return 0;
out:
mutex_unlock(&ctx->mapping_lock);
return -ENOMEM;
}
static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
......@@ -2485,42 +2501,54 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
if (!buf || len < 0)
return -EINVAL;
error = spu_acquire(ctx);
if (error)
return error;
while (cnt < len) {
char tbuf[128];
int width;
if (file->f_flags & O_NONBLOCK) {
if (spufs_switch_log_used(ctx) <= 0)
return cnt ? cnt : -EAGAIN;
} else {
/* Wait for data in buffer */
error = wait_event_interruptible(ctx->switch_log->wait,
spufs_switch_log_used(ctx) > 0);
if (error)
if (spufs_switch_log_used(ctx) == 0) {
if (cnt > 0) {
/* If there's data ready to go, we can
* just return straight away */
break;
} else if (file->f_flags & O_NONBLOCK) {
error = -EAGAIN;
break;
}
spin_lock(&ctx->switch_log->lock);
if (ctx->switch_log->head == ctx->switch_log->tail) {
/* multiple readers race? */
spin_unlock(&ctx->switch_log->lock);
continue;
} else {
/* spufs_wait will drop the mutex and
* re-acquire, but since we're in read(), the
* file cannot be _released (and so
* ctx->switch_log is stable).
*/
error = spufs_wait(ctx->switch_log->wait,
spufs_switch_log_used(ctx) > 0);
/* On error, spufs_wait returns without the
* state mutex held */
if (error)
return error;
/* We may have had entries read from underneath
* us while we dropped the mutex in spufs_wait,
* so re-check */
if (spufs_switch_log_used(ctx) == 0)
continue;
}
}
width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
if (width < len) {
if (width < len)
ctx->switch_log->tail =
(ctx->switch_log->tail + 1) %
SWITCH_LOG_BUFSIZE;
}
spin_unlock(&ctx->switch_log->lock);
/*
* If the record is greater than space available return
* partial buffer (so far)
*/
if (width >= len)
else
/* If the record is greater than space available return
* partial buffer (so far) */
break;
error = copy_to_user(buf + cnt, tbuf, width);
......@@ -2529,6 +2557,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
cnt += width;
}
spu_release(ctx);
return cnt == 0 ? error : cnt;
}
......@@ -2537,29 +2567,41 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
struct inode *inode = file->f_path.dentry->d_inode;
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
int rc;
poll_wait(file, &ctx->switch_log->wait, wait);
rc = spu_acquire(ctx);
if (rc)
return rc;
if (spufs_switch_log_used(ctx) > 0)
mask |= POLLIN;
spu_release(ctx);
return mask;
}
static const struct file_operations spufs_switch_log_fops = {
.owner = THIS_MODULE,
.open = spufs_switch_log_open,
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
.owner = THIS_MODULE,
.open = spufs_switch_log_open,
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
.release = spufs_switch_log_release,
};
/**
* Log a context switch event to a switch log reader.
*
* Must be called with ctx->state_mutex held.
*/
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val)
{
if (!ctx->switch_log)
return;
spin_lock(&ctx->switch_log->lock);
if (spufs_switch_log_avail(ctx) > 1) {
struct switch_log_entry *p;
......@@ -2573,7 +2615,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
ctx->switch_log->head =
(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
}
spin_unlock(&ctx->switch_log->lock);
wake_up(&ctx->switch_log->wait);
}
......
......@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
spu_release(ctx);
if (signal_pending(current))
......@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);
spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++;
......
......@@ -312,6 +312,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
*/
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
/*
* "available_spus" counts how many spus are not potentially
* going to be used by other affinity gangs whose reference
* context is already in place. Although this code seeks to
* avoid having affinity gangs with a summed amount of
* contexts bigger than the amount of spus in the node,
* this may happen sporadically. In this case, available_spus
* becomes negative, which is harmless.
*/
int available_spus;
node = (node < MAX_NUMNODES) ? node : 0;
......@@ -321,12 +330,10 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
available_spus = 0;
mutex_lock(&cbe_spu_info[node].list_mutex);
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
if (spu->ctx && spu->ctx->gang
&& spu->ctx->aff_offset == 0)
available_spus -=
(spu->ctx->gang->contexts - 1);
else
available_spus++;
if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
&& spu->ctx->gang->aff_ref_spu)
available_spus -= spu->ctx->gang->contexts;
available_spus++;
}
if (available_spus < ctx->gang->contexts) {
mutex_unlock(&cbe_spu_info[node].list_mutex);
......@@ -437,6 +444,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
if (ctx->gang)
/*
* If ctx->gang->aff_sched_count is positive, SPU affinity is
* being considered in this gang. Using atomic_dec_if_positive
* allow us to skip an explicit check for affinity in this gang
*/
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
spu_switch_notify(spu, NULL);
......
......@@ -65,7 +65,6 @@ enum {
};
struct switch_log {
spinlock_t lock;
wait_queue_head_t wait;
unsigned long head;
unsigned long tail;
......
......@@ -40,6 +40,7 @@ static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
static ktime_t sputrace_start;
static unsigned long sputrace_head, sputrace_tail;
static struct sputrace *sputrace_log;
static int sputrace_logging;
static int sputrace_used(void)
{
......@@ -79,6 +80,11 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
char tbuf[128];
int width;
/* If we have data ready to return, don't block waiting
* for more */
if (cnt > 0 && sputrace_used() == 0)
break;
error = wait_event_interruptible(sputrace_wait,
sputrace_used() > 0);
if (error)
......@@ -109,24 +115,49 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
static int sputrace_open(struct inode *inode, struct file *file)
{
int rc;
spin_lock(&sputrace_lock);
if (sputrace_logging) {
rc = -EBUSY;
goto out;
}
sputrace_logging = 1;
sputrace_head = sputrace_tail = 0;
sputrace_start = ktime_get();
rc = 0;
out:
spin_unlock(&sputrace_lock);
return rc;
}
static int sputrace_release(struct inode *inode, struct file *file)
{
spin_lock(&sputrace_lock);
sputrace_logging = 0;
spin_unlock(&sputrace_lock);
return 0;
}
static const struct file_operations sputrace_fops = {
.owner = THIS_MODULE,
.open = sputrace_open,
.read = sputrace_read,
.owner = THIS_MODULE,
.open = sputrace_open,
.read = sputrace_read,
.release = sputrace_release,
};
static void sputrace_log_item(const char *name, struct spu_context *ctx,
struct spu *spu)
{
spin_lock(&sputrace_lock);
if (!sputrace_logging) {
spin_unlock(&sputrace_lock);
return;
}
if (sputrace_avail() > 1) {
struct sputrace *t = sputrace_log + sputrace_head;
......
......@@ -116,10 +116,7 @@ static void c2k_restart(char *cmd)
void c2k_show_cpuinfo(struct seq_file *m)
{
uint memsize = total_memory;
seq_printf(m, "Vendor\t\t: GEFanuc\n");
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING);
}
......
......@@ -119,10 +119,7 @@ static void prpmc2800_restart(char *cmd)
void prpmc2800_show_cpuinfo(struct seq_file *m)
{
uint memsize = total_memory;
seq_printf(m, "Vendor\t\t: Motorola\n");
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING);
}
......
......@@ -22,6 +22,12 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
int ret;
start_pfn = base >> PAGE_SHIFT;
if (!pfn_valid(start_pfn)) {
lmb_remove(base, lmb_size);
return 0;
}
zone = page_zone(pfn_to_page(start_pfn));
/*
......
......@@ -44,6 +44,7 @@
#include <asm/tce.h>
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/kdump.h>
#include "plpar_wrappers.h"
......@@ -291,9 +292,8 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_base = (unsigned long)__va(*basep);
#ifndef CONFIG_CRASH_DUMP
memset((void *)tbl->it_base, 0, *sizep);
#endif
if (!__kdump_flag)
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
......
......@@ -52,8 +52,8 @@
/*
* The primary thread of each non-boot processor is recorded here before
* smp init.
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
static cpumask_t of_spin_map;
......@@ -161,8 +161,7 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
static int smp_pSeries_cpu_bootable(unsigned int nr)
{
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it. Odd-numbered
* cpus are assumed to be secondary threads.
* during boot if the user requests it.
*/
if (system_state < SYSTEM_RUNNING &&
cpu_has_feature(CPU_FTR_SMT) &&
......@@ -199,11 +198,7 @@ static void __init smp_init_pseries(void)
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (i % 2 == 0)
/*
* Even-numbered logical cpus correspond to
* primary threads.
*/
if (cpu_thread_in_core(i) == 0)
cpu_set(i, of_spin_map);
}
} else {
......
......@@ -37,6 +37,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_4xx) += ppc4xx_pci.o
endif
obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
obj-$(CONFIG_CPM) += cpm_common.o
obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o
......
/*
* PPC4xx gpio driver
*
* Copyright (c) 2008 Harris Corporation
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Steve Falco <sfalco@harris.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/types.h>
#define GPIO_MASK(gpio) (0x80000000 >> (gpio))
#define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2))
/* Physical GPIO register layout */
struct ppc4xx_gpio {
__be32 or;
__be32 tcr;
__be32 osrl;
__be32 osrh;
__be32 tsrl;
__be32 tsrh;
__be32 odr;
__be32 ir;
__be32 rr1;
__be32 rr2;
__be32 rr3;
__be32 reserved1;
__be32 isr1l;
__be32 isr1h;
__be32 isr2l;
__be32 isr2h;
__be32 isr3l;
__be32 isr3h;
};
struct ppc4xx_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
};
/*
* GPIO LIB API implementation for GPIOs
*
* There are a maximum of 32 gpios in each gpio controller.
*/
static inline struct ppc4xx_gpio_chip *
to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc)
{
return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc);
}
static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
return in_be32(&regs->ir) & GPIO_MASK(gpio);
}
static inline void
__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
if (val)
setbits32(&regs->or, GPIO_MASK(gpio));
else
clrbits32(&regs->or, GPIO_MASK(gpio));
}
static void
ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__ppc4xx_gpio_set(gc, gpio, val);
spin_unlock_irqrestore(&chip->lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
}
static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* Disable open-drain function */
clrbits32(&regs->odr, GPIO_MASK(gpio));
/* Float the pin */
clrbits32(&regs->tcr, GPIO_MASK(gpio));
/* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */
if (gpio < 16) {
clrbits32(&regs->osrl, GPIO_MASK2(gpio));
clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
} else {
clrbits32(&regs->osrh, GPIO_MASK2(gpio));
clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
}
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int
ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* First set initial value */
__ppc4xx_gpio_set(gc, gpio, val);
/* Disable open-drain function */
clrbits32(&regs->odr, GPIO_MASK(gpio));
/* Drive the pin */
setbits32(&regs->tcr, GPIO_MASK(gpio));
/* Bits 0-15 use TSRL, bits 16-31 use TSRH */
if (gpio < 16) {
clrbits32(&regs->osrl, GPIO_MASK2(gpio));
clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
} else {
clrbits32(&regs->osrh, GPIO_MASK2(gpio));
clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
}
spin_unlock_irqrestore(&chip->lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
return 0;
}
static int __init ppc4xx_add_gpiochips(void)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") {
int ret;
struct ppc4xx_gpio_chip *ppc4xx_gc;
struct of_mm_gpio_chip *mm_gc;
struct of_gpio_chip *of_gc;
struct gpio_chip *gc;
ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL);
if (!ppc4xx_gc) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&ppc4xx_gc->lock);
mm_gc = &ppc4xx_gc->mm_gc;
of_gc = &mm_gc->of_gc;
gc = &of_gc->gc;
of_gc->gpio_cells = 2;
gc->ngpio = 32;
gc->direction_input = ppc4xx_gpio_dir_in;
gc->direction_output = ppc4xx_gpio_dir_out;
gc->get = ppc4xx_gpio_get;
gc->set = ppc4xx_gpio_set;
ret = of_mm_gpiochip_add(np, mm_gc);
if (ret)
goto err;
continue;
err:
pr_err("%s: registration failed with status %d\n",
np->full_name, ret);
kfree(ppc4xx_gc);
/* try others anyway */
}
return 0;
}
arch_initcall(ppc4xx_add_gpiochips);
此差异已折叠。
......@@ -27,6 +27,7 @@
#ifndef HVC_CONSOLE_H
#define HVC_CONSOLE_H
#include <linux/kref.h>
#include <linux/tty.h>
/*
* This is the max number of console adapters that can/will be found as
......@@ -56,6 +57,8 @@ struct hvc_struct {
struct hv_ops *ops;
int irq_requested;
int data;
struct winsize ws;
struct work_struct tty_resize;
struct list_head next;
struct kref kref; /* ref count & hvc_struct lifetime */
};
......@@ -65,9 +68,10 @@ struct hv_ops {
int (*get_chars)(uint32_t vtermno, char *buf, int count);
int (*put_chars)(uint32_t vtermno, const char *buf, int count);
/* Callbacks for notification. Called in open and close */
/* Callbacks for notification. Called in open, close and hangup */
int (*notifier_add)(struct hvc_struct *hp, int irq);
void (*notifier_del)(struct hvc_struct *hp, int irq);
void (*notifier_hangup)(struct hvc_struct *hp, int irq);
};
/* Register a vterm and a slot index for use as a console (console_init) */
......@@ -77,15 +81,19 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
struct hv_ops *ops, int outbuf_size);
/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
extern int __devexit hvc_remove(struct hvc_struct *hp);
extern int hvc_remove(struct hvc_struct *hp);
/* data available */
int hvc_poll(struct hvc_struct *hp);
void hvc_kick(void);
/* Resize hvc tty terminal window */
extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
/* default notifier for irq based notification */
extern int notifier_add_irq(struct hvc_struct *hp, int data);
extern void notifier_del_irq(struct hvc_struct *hp, int data);
extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册