提交 7cd2541c 编写于 作者: I Ingo Molnar

Merge commit 'v2.6.36-rc7' into perf/core

Conflicts:
	arch/x86/kernel/module.c

Merge reason: Resolve the conflict, pick up fixes.
Signed-off-by: NIngo Molnar <mingo@elte.hu>
......@@ -3554,12 +3554,12 @@ E: cvance@nai.com
D: portions of the Linux Security Module (LSM) framework and security modules
N: Petr Vandrovec
E: vandrove@vc.cvut.cz
E: petr@vandrovec.name
D: Small contributions to ncpfs
D: Matrox framebuffer driver
S: Chudenicka 8
S: 10200 Prague 10, Hostivar
S: Czech Republic
S: 21513 Conradia Ct
S: Cupertino, CA 95014
S: USA
N: Thibaut Varene
E: T-Bone@parisc-linux.org
......
......@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6410/
ARM/S5P ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5p*/
ARM/SHMOBILE ARM ARCHITECTURE
M: Paul Mundt <lethal@linux-sh.org>
M: Magnus Damm <magnus.damm@gmail.com>
......@@ -1220,7 +1227,7 @@ F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
AVR32 ARCHITECTURE
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
......@@ -1228,7 +1235,7 @@ S: Supported
F: arch/avr32/
AVR32/AT32AP MACHINE SUPPORT
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
S: Supported
F: arch/avr32/mach-at32ap/
......@@ -2199,6 +2206,12 @@ W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
EFIFB FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
M: Peter Jones <pjones@redhat.com>
S: Maintained
F: drivers/video/efifb.c
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
......@@ -2662,6 +2675,8 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
F: drivers/hwmon/
......@@ -3773,9 +3788,8 @@ W: http://www.syskonnect.com
S: Supported
MATROX FRAMEBUFFER DRIVER
M: Petr Vandrovec <vandrove@vc.cvut.cz>
L: linux-fbdev@vger.kernel.org
S: Maintained
S: Orphan
F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
......@@ -3899,10 +3913,8 @@ F: Documentation/serial/moxa-smartio
F: drivers/char/mxser.*
MSI LAPTOP SUPPORT
M: Lennart Poettering <mzxreary@0pointer.de>
M: Lee, Chun-Yi <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
W: https://tango.0pointer.de/mailman/listinfo/s270-linux
W: http://0pointer.de/lennart/tchibo.html
S: Maintained
F: drivers/platform/x86/msi-laptop.c
......@@ -3919,8 +3931,10 @@ S: Supported
F: drivers/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
S: Orphan
M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
S: Maintained
F: drivers/mmc/
F: include/linux/mmc/
......@@ -3962,8 +3976,8 @@ S: Maintained
F: drivers/net/natsemi.c
NCP FILESYSTEM
M: Petr Vandrovec <vandrove@vc.cvut.cz>
S: Maintained
M: Petr Vandrovec <petr@vandrovec.name>
S: Odd Fixes
F: fs/ncpfs/
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
......@@ -5091,8 +5105,10 @@ S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
S: Orphan
M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
S: Maintained
F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Sheep on Meth
# *DOCUMENTATION*
......
......@@ -73,8 +73,6 @@
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
ldq $0, 0($sp); \
ldq $1, 8($sp); \
99:; \
ldq $19, 72($sp); \
ldq $20, 80($sp); \
......@@ -316,7 +314,7 @@ ret_from_sys_call:
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
beq $0, restore_all
beq $0, ret_to_kernel
ret_to_user:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
......@@ -329,6 +327,11 @@ restore_all:
RESTORE_ALL
call_pal PAL_rti
ret_to_kernel:
lda $16, 7
call_pal PAL_swpipl
br restore_all
.align 3
$syscall_error:
/*
......@@ -657,7 +660,7 @@ kernel_thread:
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
br restore_all
br ret_to_kernel
.end kernel_thread
/*
......@@ -911,15 +914,6 @@ sys_execve:
jmp $31, do_sys_execve
.end sys_execve
.align 4
.globl osf_sigprocmask
.ent osf_sigprocmask
osf_sigprocmask:
.prologue 0
mov $sp, $18
jmp $31, sys_osf_sigprocmask
.end osf_sigprocmask
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
......
......@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
dest[30] = rdusp();
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
......
......@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
*
* how:
* 1 - SIG_BLOCK
* 2 - SIG_UNBLOCK
* 3 - SIG_SETMASK
*
* We change the range to -1 .. 1 in order to let gcc easily
* use the conditional move instructions.
*
* Note that we don't need to acquire the kernel lock for SMP
* operation, as all of this is local to this thread.
*/
SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
struct pt_regs *, regs)
SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
{
unsigned long oldmask = -EINVAL;
if ((unsigned long)how-1 <= 2) {
long sign = how-2; /* -1 .. 1 */
unsigned long block, unblock;
newmask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
oldmask = current->blocked.sig[0];
unblock = oldmask & ~newmask;
block = oldmask | newmask;
if (!sign)
block = unblock;
if (sign <= 0)
newmask = block;
if (_NSIG_WORDS > 1 && sign > 0)
sigemptyset(&current->blocked);
current->blocked.sig[0] = newmask;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs->r0 = 0; /* special no error return */
sigset_t oldmask;
sigset_t mask;
unsigned long res;
siginitset(&mask, newmask & _BLOCKABLE);
res = sigprocmask(how, &mask, &oldmask);
if (!res) {
force_successful_syscall_return();
res = oldmask.sig[0];
}
return oldmask;
return res;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
......@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags))
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
......@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags))
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
......
......@@ -58,7 +58,7 @@ sys_call_table:
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
.quad osf_sigprocmask
.quad sys_osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct
......
......@@ -271,7 +271,6 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
......@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
config ARM_ERRATA_742230
bool "ARM errata: DMB operation may be faulty"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 742230 Cortex-A9
(r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
between two write operations may not ensure the correct visibility
ordering of the two writes. This workaround sets a specific bit in
the diagnostic register of the Cortex-A9 which causes the DMB
instruction to behave as a DSB, ensuring the correct behaviour of
the two writes.
config ARM_ERRATA_742231
bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 742231 Cortex-A9
(r2p0..r2p2) erratum. Under certain conditions, specific to the
Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
accessing some data located in the same cache line, may get corrupted
data due to bad handling of the address hazard when the line gets
replaced from one of the CPUs at the same time as another CPU is
accessing it. This workaround sets specific bits in the diagnostic
register of the Cortex-A9 which reduces the linefill issuing
capabilities of the processor.
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
......
......@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
@sed "$(SEDFLAGS)" < $< > $@
......@@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
return 0;
return -EIO;
}
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
......
......@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
......
......@@ -48,6 +48,8 @@ work_pending:
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
tst r1, #_TIF_SIGPENDING @ delivering a signal?
movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again
......
......@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA21,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi0_device = {
......@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB11,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi1_device = {
......
......@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};
......
......@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};
......
......@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};
......
......@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};
......
......@@ -13,8 +13,8 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
DOVE_PCIE0_IO_VIRT_BASE))
#define __mem_pci(a) (a)
#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
DOVE_PCIE0_IO_VIRT_BASE))
#define __mem_pci(a) (a)
#endif
......@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= SZ_64M - 1)
return 0;
return -EIO;
}
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
......@@ -26,6 +26,8 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
#define ARCH_HAS_DMA_SET_COHERENT_MASK
#define pcibios_assign_all_busses() 1
/* Register locations and bits */
......
......@@ -38,7 +38,7 @@
#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000
#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
......
......@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 0 I/O Space";
pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
......@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 1 I/O Space";
pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
......
......@@ -9,6 +9,8 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
#include <mach/cputype.h>
static inline void arch_idle(void)
{
cpu_do_idle();
......@@ -16,6 +18,9 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
cpu_reset(0);
if (cpu_is_pxa168())
cpu_reset(0xffff0000);
else
cpu_reset(0);
}
#endif /* __ASM_MACH_SYSTEM_H */
......@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
freqs.cpu = policy->cpu;
if (freq_debug)
pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
"(SDRAM %d Mhz)\n",
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
......
......@@ -264,23 +264,35 @@
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
* == 0x3 for pxa300/pxa310/pxa320
*/
#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
#define __cpu_is_pxa2xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id <= 0x2; \
})
#else
#define __cpu_is_pxa2xx(id) (0)
#endif
#ifdef CONFIG_PXA3xx
#define __cpu_is_pxa3xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id == 0x3; \
})
#else
#define __cpu_is_pxa3xx(id) (0)
#endif
#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
#define __cpu_is_pxa93x(id) \
({ \
unsigned int _id = (id) >> 4 & 0xfff; \
_id == 0x683 || _id == 0x693; \
})
#else
#define __cpu_is_pxa93x(id) (0)
#endif
#define cpu_is_pxa2xx() \
({ \
......@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
#define pcibios_assign_all_busses() 1
#define ARCH_HAS_DMA_SET_COHERENT_MASK
#endif
#endif /* _ASM_ARCH_HARDWARE_H */
......@@ -6,6 +6,8 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
#include <mach/hardware.h>
#define IO_SPACE_LIMIT 0xffffffff
/*
......
......@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
},
};
static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
.use_pio = 1,
};
void __init palm27x_pmic_init(void)
{
i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
pxa27x_set_i2c_power_info(NULL);
pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
}
#endif
......@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data vpac270_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_power = -1,
.gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
.gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
.detect_delay_ms = 200,
......
......@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
extern int gpio_get_value(unsigned gpio);
extern void gpio_set_value(unsigned gpio, int value);
#define gpio_get_value_cansleep gpio_get_value
#define gpio_set_value_cansleep gpio_set_value
/* wrappers to sleep-enable the previous two functions */
static inline unsigned gpio_to_irq(unsigned gpio)
{
......
......@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
int i;
#ifdef CONFIG_CACHE_L2X0
l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
/* set RAM latencies to 1 cycle for this core tile. */
writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
......
......@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else
set_cr(cr_no_alignment);
else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
* reaching user space, then the IRQ vector entry code will
* notice that we were still in kernel space and therefore
* the alignment trap won't be re-enabled in that case as it
* is presumed to be always on from kernel space.
* Let's prevent that race by disabling interrupts here (they
* are disabled on the way back to user space anyway in
* entry-common.S) and disable the alignment trap only if
* there is no work pending for this thread.
*/
raw_local_irq_disable();
if (!(current_thread_info()->flags & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
return 0;
}
......
......@@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/sections.h>
......@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
......@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
......@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
if (arch_is_coherent() && cpu_is_xsc3())
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
/*
* ARMv6 and above have extended page tables.
*/
......@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
#endif
}
......@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
......@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
}
}
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
#endif
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
......
......@@ -186,13 +186,14 @@ cpu_v7_name:
* It is assumed that:
* - cache type register is implemented
*/
__v7_setup:
__v7_ca9mp_setup:
#ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
......@@ -201,11 +202,16 @@ __v7_setup:
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
bne 2f
bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
orr r0, r6, r5, lsr #20-4 @ combine variant and revision
orr r6, r6, r5, lsr #20-4 @ combine variant and revision
ubfx r0, r0, #4, #12 @ primary part number
/* Cortex-A8 Errata */
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
teq r0, r10
bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
......@@ -213,21 +219,42 @@ __v7_setup:
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
teq r0, #0x20 @ only present in r2p0
teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
teq r0, #0x20 @ only present in r2p0
teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
b 3f
/* Cortex-A9 Errata */
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
bne 3f
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 4 @ set bit #4
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_742231
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 12 @ set bit #12
orreq r10, r10, #1 << 22 @ set bit #22
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
2: mov r10, #0
3: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
......@@ -323,6 +350,29 @@ cpu_elf_name:
.section ".proc.info.init", #alloc, #execinstr
.type __v7_ca9mp_proc_info, #object
__v7_ca9mp_proc_info:
.long 0x410fc090 @ Required ID value
.long 0xff0ffff0 @ Mask for ID
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __v7_ca9mp_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_v7_name
.long v7_processor_functions
.long v7wbi_tlb_fns
.long v6_user_fns
.long v7_cache_fns
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
/*
* Match any ARMv7 processor core.
*/
......
/*
* linux/arch/arm/mach-nomadik/timer.c
* linux/arch/arm/plat-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
......@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
cr = readl(mtu_base + MTU_CR(1));
writel(0, mtu_base + MTU_LR(1));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
writel(0x2, mtu_base + MTU_IMSC);
writel(1 << 1, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
......@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
{
unsigned long rate;
struct clk *clk0;
struct clk *clk1;
u32 cr;
u32 cr = MTU_CRn_32BITS;
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
clk1 = clk_get_sys("mtu1", NULL);
BUG_ON(IS_ERR(clk1));
clk_enable(clk0);
clk_enable(clk1);
/*
* Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
* use a divide-by-16 counter if it's more than 16MHz
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
* for ux500.
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
* At 32 MHz, the timer (with 32 bit counter) can be programmed
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
* with 16 gives too low timer resolution.
*/
cr = MTU_CRn_32BITS;;
rate = clk_get_rate(clk0);
if (rate > 16 << 20) {
if (rate > 32000000) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
......@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
/* Timer 1 is used for events, fix according to rate */
cr = MTU_CRn_32BITS;
rate = clk_get_rate(clk1);
if (rate > 16 << 20) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
cr |= MTU_CRn_PRESCALE_1;
}
/* Timer 1 is used for events */
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
......
......@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
config OMAP_DEBUG_LEDS
bool
depends on OMAP_DEBUG_DEVICES
default y if LEDS
default y if LEDS_CLASS
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
......
......@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
} else {
complete(&mcbsp_rx->tx_irq_completion);
complete(&mcbsp_rx->rx_irq_completion);
}
return IRQ_HANDLED;
......
......@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
if (cpu_is_omap24xx()) {
omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
base = OMAP2_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
}
if (cpu_is_omap34xx()) {
omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
base = OMAP3_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
/*
* SRAM must be marked as non-cached on OMAP3 since the
* CORE DPLL M2 divider change code (in SRAM) runs with the
......@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
}
if (cpu_is_omap44xx()) {
omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
base = OMAP4_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
}
omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
omap_sram_io_desc[0].virtual = omap_sram_base;
base = omap_sram_start;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
......
......@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
return module_bug_finalize(hdr, sechdrs, module);
return 0;
}
void module_arch_cleanup(struct module *module)
{
module_bug_cleanup(module);
}
......@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
......@@ -157,7 +157,6 @@ typedef struct sigaltstack {
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
......
......@@ -351,6 +351,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __IGNORE_lchown
#define __IGNORE_setuid
......
......@@ -235,10 +235,9 @@ work_resched:
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
ldi r1, #0 ; arg2 : sigset_t *oldset
mv r2, r9 ; arg3 : __u32 thread_info_flags
mv r1, r9 ; arg2 : __u32 thread_info_flags
bl do_notify_resume
bra restore_all
bra resume_userspace
; perform syscall exit tracing
ALIGN
......
......@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
break;
return -EIO;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
break;
return -EIO;
if (embed_debug_trap(child, next_pc))
break;
return -EIO;
invalidate_cache();
return 0;
}
void user_disable_single_step(struct task_struct *child)
......
......@@ -28,37 +28,6 @@
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
int do_signal(struct pt_regs *, sigset_t *);
asmlinkage int
sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
unsigned long r2, unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, struct pt_regs *regs)
{
sigset_t newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r2, unsigned long r3, unsigned long r4,
......@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
......@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc);
#endif
return;
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
static int prev_insn(struct pt_regs *regs)
{
u16 inst;
if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
return -EFAULT;
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
regs->syscall_nr = -1;
return 0;
}
/*
* OK, we're invoking a handler
*/
static void
static int
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
unsigned short inst;
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
......@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
if (prev_insn(regs) < 0)
return -EFAULT;
}
}
/* Set up the stack frame */
setup_rt_frame(sig, ka, info, oldset, regs);
if (setup_rt_frame(sig, ka, info, oldset, regs))
return -EFAULT;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
......@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
/*
......@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
int do_signal(struct pt_regs *regs, sigset_t *oldset)
static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
unsigned short inst;
sigset_t *oldset;
/*
* We want the common case to go fast, which
......@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* if so.
*/
if (!user_mode(regs))
return 1;
return;
if (try_to_freeze())
goto no_signal;
if (!oldset)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
......@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
*/
/* Whee! Actually deliver the signal. */
handle_signal(signr, &ka, &info, oldset, regs);
return 1;
if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
no_signal:
......@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
}
if (regs->r0 == -ERESTART_RESTARTBLOCK){
prev_insn(regs);
} else if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
prev_insn(regs);
}
}
return 0;
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
__u32 thread_info_flags)
void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
......@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset);
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
......
......@@ -162,7 +162,7 @@ static void mac_init_asc( void )
void mac_mksound( unsigned int freq, unsigned int length )
{
__u32 cfreq = ( freq << 5 ) / 468;
__u32 flags;
unsigned long flags;
int i;
if ( mac_special_bell == NULL )
......@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
*/
static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
{
__u32 flags;
unsigned long flags;
/* if the bell is already ringing, ring longer */
if ( mac_bell_duration > 0 )
......@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
static void mac_quadra_ring_bell( unsigned long ignored )
{
int i, count = mac_asc_samplespersec / HZ;
__u32 flags;
unsigned long flags;
/*
* we neither want a sound buffer overflow nor underflow, so we need to match
......
......@@ -13,6 +13,7 @@ config MIPS
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
mainmenu "Linux/MIPS Kernel Configuration"
......@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
select SYS_SUPPORTS_SMP
select SMP_UP
help
This is a kernel model which is also known a VSMP or lately
has been marketesed into SMVP.
This is a kernel model which is known a VSMP but lately has been
marketesed into SMVP.
Virtual SMP uses the processor's VPEs to implement virtual
processors. In currently available configuration of the 34K processor
this allows for a dual processor. Both processors will share the same
primary caches; each will obtain the half of the TLB for it's own
exclusive use. For a layman this model can be described as similar to
what Intel calls Hyperthreading.
For further information see http://www.linux-mips.org/wiki/34K#VSMP
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
......@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
help
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
is presenting the available TC's of the core as processors to Linux.
On currently available 34K processors this means a Linux system will
see up to 5 processors. The implementation of the SMTC kernel differs
significantly from VSMP and cannot efficiently coexist in the same
kernel binary so the choice between VSMP and SMTC is a compile time
decision.
For further information see http://www.linux-mips.org/wiki/34K#SMTC
endchoice
......
......@@ -43,7 +43,7 @@ int prom_argc;
char **prom_argv;
char **prom_envp;
void prom_init_cmdline(void)
void __init prom_init_cmdline(void)
{
int i;
......@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
}
}
int prom_get_ethernet_addr(char *ethernet_addr)
int __init prom_get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
......@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
return 0;
}
EXPORT_SYMBOL(prom_get_ethernet_addr);
void __init prom_free_prom_memory(void)
{
......
......@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
hostprogs-y := calc_vmlinuz_load_addr
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
vmlinuzobjs-y += $(obj)/piggy.o
......
......@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_STATIC
depends on CPU_CAVIUM_OCTEON
config CAVIUM_OCTEON_HELPER
def_bool y
depends on OCTEON_ETHERNET || PCI
......@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK; /* Let default notifier send signals */
}
static int cnmips_cu2_setup(void)
static int __init cnmips_cu2_setup(void)
{
return cu2_notifier(cnmips_cu2_call, 0);
}
......
......@@ -11,4 +11,4 @@
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
......@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
#else /* !CONFIG_64BIT */
#include <asm-generic/atomic64.h>
#endif /* CONFIG_64BIT */
/*
......
......@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
#define cu2_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb __cpuinitdata = { \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
......
......@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
*/
struct gic_intr_map {
unsigned int cpunum; /* Directed to this CPU */
#define GIC_UNUSED 0xdead /* Dummy data */
unsigned int pin; /* Directed to this Pin */
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
......
#ifndef __ASM_MACH_TX49XX_KMALLOC_H
#define __ASM_MACH_TX49XX_KMALLOC_H
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
......@@ -88,9 +88,6 @@
#define GIC_EXT_INTR(x) x
/* Dummy data */
#define X 0xdead
/* External Interrupts used for IPI */
#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
......
......@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#endif
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
/*
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
* (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
* discussion can be found in lkml posting
* <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
* archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
*
* It is unclear if the misscompilations mentioned in
* http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
* until GCC 3.x has been retired before we can apply
* https://patchwork.linux-mips.org/patch/1541/
*/
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
......
......@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)
#define _TIF_WORK_MASK (0x0000ffef & \
~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
......
......@@ -356,16 +356,19 @@
#define __NR_perf_event_open (__NR_Linux + 333)
#define __NR_accept4 (__NR_Linux + 334)
#define __NR_recvmmsg (__NR_Linux + 335)
#define __NR_fanotify_init (__NR_Linux + 336)
#define __NR_fanotify_mark (__NR_Linux + 337)
#define __NR_prlimit64 (__NR_Linux + 338)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 335
#define __NR_Linux_syscalls 338
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 335
#define __NR_O32_Linux_syscalls 338
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -668,16 +671,19 @@
#define __NR_perf_event_open (__NR_Linux + 292)
#define __NR_accept4 (__NR_Linux + 293)
#define __NR_recvmmsg (__NR_Linux + 294)
#define __NR_fanotify_init (__NR_Linux + 295)
#define __NR_fanotify_mark (__NR_Linux + 296)
#define __NR_prlimit64 (__NR_Linux + 297)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 294
#define __NR_Linux_syscalls 297
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 294
#define __NR_64_Linux_syscalls 297
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -985,16 +991,19 @@
#define __NR_accept4 (__NR_Linux + 297)
#define __NR_recvmmsg (__NR_Linux + 298)
#define __NR_getdents64 (__NR_Linux + 299)
#define __NR_fanotify_init (__NR_Linux + 300)
#define __NR_fanotify_mark (__NR_Linux + 301)
#define __NR_prlimit64 (__NR_Linux + 302)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 299
#define __NR_Linux_syscalls 302
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 299
#define __NR_N32_Linux_syscalls 302
#ifdef __KERNEL__
......
......@@ -7,7 +7,6 @@
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/gcmpregs.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/irq.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
......@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
int i;
irq -= _irqbase;
pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
pr_debug("%s(%d) called\n", __func__, irq);
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
......@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
if (cpu == X)
if (cpu == GIC_UNUSED)
continue;
if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
continue;
......
......@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
/* Userpace events, ignore. */
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
......
......@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0, 0)) == 0)
ret.retval = tv.tv_sec;
ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:
......
......@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
{
return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
}
SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
u64, a3, u64, a4, int, dfd, const char __user *, pathname)
{
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
}
......@@ -583,7 +583,10 @@ einval: li v0, -ENOSYS
sys sys_rt_tgsigqueueinfo 4
sys sys_perf_event_open 5
sys sys_accept4 4
sys sys_recvmmsg 5
sys sys_recvmmsg 5 /* 4335 */
sys sys_fanotify_init 2
sys sys_fanotify_mark 6
sys sys_prlimit64 4
.endm
/* We pre-compute the number of _instruction_ bytes needed to
......
......@@ -416,9 +416,12 @@ sys_call_table:
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev /* 5390 */
PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR sys_recvmmsg
PTR sys_recvmmsg
PTR sys_fanotify_init /* 5295 */
PTR sys_fanotify_mark
PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
......@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
PTR sys_getdents
PTR sys_getdents64
PTR sys_fanotify_init /* 6300 */
PTR sys_fanotify_mark
PTR sys_prlimit64
.size sysn32_call_table,.-sysn32_call_table
......@@ -538,5 +538,8 @@ sys_call_table:
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
PTR compat_sys_recvmmsg /* 4335 */
PTR sys_fanotify_init
PTR sys_32_fanotify_mark
PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
......@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ISA
if (dev == NULL)
gfp |= __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
gfp |= __GFP_DMA;
dma_flag = __GFP_DMA;
else
#endif
#ifdef CONFIG_ZONE_DMA32
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
dma_flag = __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA;
else
#endif
;
dma_flag = 0;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
return gfp;
return gfp | dma_flag;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
......
......@@ -30,7 +30,7 @@
#define tc_lsize 32
extern unsigned long icache_way_size, dcache_way_size;
unsigned long tcache_size;
static unsigned long tcache_size;
#include <asm/r4kcache.h>
......
......@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
*/
#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
#define X GIC_UNUSED
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
{ X, X, X, X, 0 },
......@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
#undef X
/*
* GCMP needs to be detected before any SMP initialisation
......
......@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
pr_err(KERN_ERR "PCI init error!!!\n");
pr_err("PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}
......
......@@ -22,29 +22,19 @@
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <glb.h>
void pnx8550_machine_restart(char *command)
{
char head[] = "************* Machine restart *************";
char foot[] = "*******************************************";
printk("\n\n");
printk("%s\n", head);
if (command != NULL)
printk("* %s\n", command);
printk("%s\n", foot);
PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
}
void pnx8550_machine_halt(void)
{
printk("*** Machine halt. (Not implemented) ***\n");
}
void pnx8550_machine_power_off(void)
{
printk("*** Machine power off. (Not implemented) ***\n");
while (1) {
if (cpu_wait)
cpu_wait();
}
}
......@@ -44,7 +44,6 @@
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
......@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
pm_power_off = pnx8550_machine_power_off;
pm_power_off = pnx8550_machine_halt;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown
......
......@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
config MN10300
def_bool y
select HAVE_OPROFILE
select HAVE_ARCH_TRACEHOOK
config AM33
def_bool y
......
......@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
choice
prompt "GDB stub port"
default GDBSTUB_TTYSM0
default GDBSTUB_ON_TTYSM0
depends on GDBSTUB
help
Select the serial port used for GDB-stub.
......
......@@ -229,9 +229,9 @@ int ffs(int x)
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr) ^ 0x18, (addr))
test_and_set_bit((nr), (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr) ^ 0x18, (addr))
test_and_clear_bit((nr), (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>
......
......@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
......
......@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
/*
......@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
......@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
......@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
......@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
{
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (is_using_fpu(current))
fpu_kill_state(current);
......@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
......@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
return 0;
give_sigsegv:
force_sig(SIGSEGV, current);
force_sigsegv(sig, current);
return -EFAULT;
}
......@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->d0 = sig;
regs->d1 = (long) &frame->info;
set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
......@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return 0;
give_sigsegv:
force_sig(SIGSEGV, current);
force_sigsegv(sig, current);
return -EFAULT;
}
static inline void stepback(struct pt_regs *regs)
{
regs->pc -= 2;
regs->orig_d0 = -1;
}
/*
* handle the actual delivery of a signal to userspace
*/
......@@ -459,7 +464,7 @@ static int handle_signal(int sig,
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
stepback(regs);
}
}
......@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
stepback(regs);
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
stepback(regs);
break;
}
}
......
......@@ -2,13 +2,11 @@
# Makefile for the MN10300-specific memory management code
#
cacheflush-y := cache.o cache-mn10300.o
cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o
ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
obj-y += cache.o cache-mn10300.o
ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
obj-y += cache-flush-mn10300.o
endif
endif
misalignment.o dma-alloc.o $(cacheflush-y)
/* Handle the cache being disabled
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
{
if (end < start)
return -EINVAL;
return 0;
}
......@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
void flush_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_MN10300_CACHE_WBACK
unsigned long addr, size, off;
unsigned long addr, size, base, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
if (end > 0x80000000UL) {
/* addresses above 0xa0000000 do not go through the cache */
if (end > 0xa0000000UL) {
end = 0xa0000000UL;
if (start >= end)
return;
}
/* kernel addresses between 0x80000000 and 0x9fffffff do not
* require page tables, so we just map such addresses directly */
base = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_dcache_flush_range(base, end);
if (base == start)
goto invalidate;
end = base;
}
for (; start < end; start += size) {
/* work out how much of the page to flush */
off = start & (PAGE_SIZE - 1);
......@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
invalidate:
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_range);
......
......@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
module_bug_cleanup(mod);
}
......@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
int err;
err = module_bug_finalize(hdr, sechdrs, me);
if (err)
return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
......@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
......@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
int id_match = 0;
if (dev == NULL || id == NULL)
return NULL;
return clk;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
......
......@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %s\n", pcictrl->full_name);
return;
goto out_put;
}
if (bus_range[1] == bus_range[0])
......@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
printk(" controlled by %s\n", pcictrl->full_name);
printk("\n");
hose = pcibios_alloc_controller(of_node_get(pcictrl));
hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %s\n",
pcictrl->full_name);
return;
goto out_put;
}
hose->first_busno = bus_range[0];
......@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
return;
out_put:
of_node_put(pcictrl);
}
#else
......
......@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
/* wait at lease 1 us */
udelay(2);
/* wait for 1 us */
udelay(1);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
/* wait at least 200ns */
/* 7 ~= (200ns * timebase) / ns2sec */
__delay(7);
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);
......
......@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return module_bug_finalize(hdr, sechdrs, me);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
......@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
ret |= module_bug_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
module_dwarf_cleanup(mod);
}
......@@ -1506,13 +1506,6 @@ handle_ill:
}
STD_ENDPROC(handle_ill)
.pushsection .rodata, "a"
.align 8
bpt_code:
bpt
ENDPROC(bpt_code)
.popsection
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
......
......@@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
static int uml_net_set_mac(struct net_device *dev, void *addr)
{
struct uml_net_private *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
spin_lock_irq(&lp->lock);
eth_mac_addr(dev, hwaddr->sa_data);
spin_unlock_irq(&lp->lock);
return 0;
}
static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
{
dev->mtu = new_mtu;
......@@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = {
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_multicast_list = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = uml_net_set_mac,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = uml_net_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
......@@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac,
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
eth_mac_addr(dev, device->mac);
/* don't use eth_mac_addr, it will not work here */
memcpy(dev->dev_addr, device->mac, ETH_ALEN);
dev->mtu = transport->user->mtu;
dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;
......
......@@ -62,7 +62,7 @@ static long execve1(const char *file,
return error;
}
long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{
long err;
......@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
return err;
}
long sys_execve(const char __user *file, char __user *__user *argv,
char __user *__user *env)
long sys_execve(const char __user *file, const char __user *const __user *argv,
const char __user *const __user *env)
{
long error;
char *filename;
......
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
......@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
fs = get_fs();
set_fs(KERNEL_DS);
ret = um_execve(filename, (char __user *__user *)argv,
(char __user *__user *) envp);
ret = um_execve(filename, (const char __user *const __user *)argv,
(const char __user *const __user *) envp);
set_fs(fs);
return ret;
......
......@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
if (arg[pos] == ',')
pos++;
if (!strncmp(arg, "ttyS", 4)) {
/*
* make sure we have
* "serial,0x3f8,115200"
* "serial,ttyS0,115200"
* "ttyS0,115200"
*/
if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
port = simple_strtoull(arg + pos, &e, 16);
if (port == 0 || arg + pos == e)
port = DEFAULT_SERIAL_PORT;
else
pos = e - arg;
} else if (!strncmp(arg + pos, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
......
......@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
#endif /* !CONFIG_AMD_IOMMU_STATS */
static inline bool is_rd890_iommu(struct pci_dev *pdev)
{
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
......@@ -368,6 +368,9 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
/* flags read from acpi table */
u8 acpi_flags;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
......@@ -411,6 +414,15 @@ struct amd_iommu {
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
/*
* This array is required to work around a potential BIOS bug.
* The BIOS may miss to restore parts of the PCI configuration
* space when the system resumes from S3. The result is that the
* IOMMU does not execute commands anymore which leads to system
* failure.
*/
u32 cache_cfg[4];
};
/*
......
......@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
(addr[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
......
......@@ -168,6 +168,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
......
......@@ -61,7 +61,7 @@ struct cstate_entry {
unsigned int ecx;
} states[ACPI_PROCESSOR_MAX_POWER];
};
static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册