提交 6338a53a 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net into net

Pull in 'net' to take in the bug fixes that didn't make it into
3.8-final.

Also, deal with the semantic conflict of the change made to
net/ipv6/xfrm6_policy.c   A missing rt6->n neighbour release
was added to 'net', but in 'net-next' we no longer cache the
neighbour entries in the ipv6 routes so that change is not
appropriate there.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -390,6 +390,7 @@ Protocol: 2.00+ ...@@ -390,6 +390,7 @@ Protocol: 2.00+
F Special (0xFF = undefined) F Special (0xFF = undefined)
10 Reserved 10 Reserved
11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de>
12 OVMF UEFI virtualization stack
Please contact <hpa@zytor.com> if you need a bootloader ID Please contact <hpa@zytor.com> if you need a bootloader ID
value assigned. value assigned.
......
...@@ -7524,7 +7524,7 @@ S: Maintained ...@@ -7524,7 +7524,7 @@ S: Maintained
F: drivers/media/tuners/tea5767.* F: drivers/media/tuners/tea5767.*
TEAM DRIVER TEAM DRIVER
M: Jiri Pirko <jpirko@redhat.com> M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/team/ F: drivers/net/team/
......
...@@ -7,8 +7,14 @@ ...@@ -7,8 +7,14 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *); unsigned int scu_get_core_count(void __iomem *);
void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int); int scu_power_mode(void __iomem *, unsigned int);
#ifdef CONFIG_SMP
void scu_enable(void __iomem *scu_base);
#else
static inline void scu_enable(void __iomem *scu_base) {}
#endif
#endif #endif
#endif #endif
...@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) ...@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
int scu_power_mode(void __iomem *scu_base, unsigned int mode) int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{ {
unsigned int val; unsigned int val;
int cpu = cpu_logical_map(smp_processor_id()); int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (mode > 3 || mode == 1 || cpu > 3) if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL; return -EINVAL;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/arch_timer.h> #include <asm/arch_timer.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/smp_twd.h> #include <asm/smp_twd.h>
#include <asm/hardware/arm_timer.h> #include <asm/hardware/arm_timer.h>
...@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) ...@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void)
void highbank_set_cpu_jump(int cpu, void *jump_addr) void highbank_set_cpu_jump(int cpu, void *jump_addr)
{ {
cpu = cpu_logical_map(cpu); cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
......
...@@ -37,7 +37,7 @@ extern void __iomem *sregs_base; ...@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
static inline void highbank_set_core_pwr(void) static inline void highbank_set_core_pwr(void)
{ {
int cpu = cpu_logical_map(smp_processor_id()); int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (scu_base_addr) if (scu_base_addr)
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
else else
...@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) ...@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
static inline void highbank_clear_core_pwr(void) static inline void highbank_clear_core_pwr(void)
{ {
int cpu = cpu_logical_map(smp_processor_id()); int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (scu_base_addr) if (scu_base_addr)
scu_power_mode(scu_base_addr, SCU_PM_NORMAL); scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
else else
......
...@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) ...@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
{ {
emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); /* r_dst = (r_src << 8) | (r_src >> 8) */
emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
/*
* we need to mask out the bits set in r_dst[23:16] due to
* the first shift instruction.
*
* note that 0x8ff is the encoded immediate 0x00ff0000.
*/
emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
} }
#else /* ARMv6+ */ #else /* ARMv6+ */
......
...@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); ...@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
#define start_thread(_regs, _pc, _usp) \ #define start_thread(_regs, _pc, _usp) \
do { \ do { \
(_regs)->pc = (_pc); \ (_regs)->pc = (_pc); \
((struct switch_stack *)(_regs))[-1].a6 = 0; \
setframeformat(_regs); \ setframeformat(_regs); \
if (current->mm) \ if (current->mm) \
(_regs)->d5 = current->mm->start_data; \ (_regs)->d5 = current->mm->start_data; \
......
...@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, ...@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
do_div(nsecs, 125); do_div(nsecs, 125);
S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
/* Program the maximum value if we have an overflow (== year 2042) */
if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator); set_clock_comparator(S390_lowcore.clock_comparator);
return 0; return 0;
} }
......
...@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG ...@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer"
menu "Tilera-specific configuration" menu "Tilera-specific configuration"
config NR_CPUS config NR_CPUS
......
...@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) ...@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
#define iowrite32 writel #define iowrite32 writel
#define iowrite64 writeq #define iowrite64 writeq
static inline void memset_io(void *dst, int val, size_t len) #if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
static inline void memset_io(volatile void *dst, int val, size_t len)
{ {
int x; int x;
BUG_ON((unsigned long)dst & 0x3); BUG_ON((unsigned long)dst & 0x3);
...@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, ...@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
writel(*(u32 *)(src + x), dst + x); writel(*(u32 *)(src + x), dst + x);
} }
#endif
/* /*
* The Tile architecture does not support IOPORT, even with PCI. * The Tile architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods, * Unfortunately we can't yet simply not declare these methods,
......
...@@ -18,32 +18,20 @@ ...@@ -18,32 +18,20 @@
#include <arch/interrupts.h> #include <arch/interrupts.h>
#include <arch/chip.h> #include <arch/chip.h>
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
/* /*
* The set of interrupts we want to allow when interrupts are nominally * The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from * disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous * the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case. * interrupts (aka "non-queued") are not blocked by the mask in any case.
*/ */
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS \ #define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT))) (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
#endif
#if CHIP_HAS_SPLIT_INTR_MASK()
/* The same macro, but for the two 32-bit SPRs separately. */
#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -126,7 +114,7 @@ ...@@ -126,7 +114,7 @@
* to know our current state. * to know our current state.
*/ */
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
/* Disable interrupts. */ /* Disable interrupts. */
#define arch_local_irq_disable() \ #define arch_local_irq_disable() \
...@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Prevent the given interrupt from being enabled next time we enable irqs. */ /* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \ #define arch_local_irq_mask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
/* Prevent the given interrupt from being enabled immediately. */ /* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \ #define arch_local_irq_mask_now(interrupt) do { \
...@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Allow the given interrupt to be enabled next time we enable irqs. */ /* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \ #define arch_local_irq_unmask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \ #define arch_local_irq_unmask_now(interrupt) do { \
...@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable interrupts. */ /* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \ #define IRQ_DISABLE(tmp0, tmp1) \
{ \ { \
movei tmp0, -1; \ movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \ }; \
{ \ { \
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__ #ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__ #define __ARCH_INTERRUPTS_H__
#ifndef __KERNEL__
/** Mask for an interrupt. */ /** Mask for an interrupt. */
/* Note: must handle breaking interrupts into high and low words manually. */ /* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK_LO(intno) (1 << (intno)) #define INT_MASK_LO(intno) (1 << (intno))
...@@ -23,6 +24,7 @@ ...@@ -23,6 +24,7 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#define INT_MASK(intno) (1ULL << (intno)) #define INT_MASK(intno) (1ULL << (intno))
#endif #endif
#endif
/** Where a given interrupt executes */ /** Where a given interrupt executes */
...@@ -92,216 +94,216 @@ ...@@ -92,216 +94,216 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \ #define QUEUED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_DMATLB_MISS) | \ (1ULL << INT_DMATLB_MISS) | \
INT_MASK(INT_DMATLB_ACCESS) | \ (1ULL << INT_DMATLB_ACCESS) | \
INT_MASK(INT_SNITLB_MISS) | \ (1ULL << INT_SNITLB_MISS) | \
INT_MASK(INT_SN_NOTIFY) | \ (1ULL << INT_SN_NOTIFY) | \
INT_MASK(INT_SN_FIREWALL) | \ (1ULL << INT_SN_FIREWALL) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_DMA_NOTIFY) | \ (1ULL << INT_DMA_NOTIFY) | \
INT_MASK(INT_IDN_CA) | \ (1ULL << INT_IDN_CA) | \
INT_MASK(INT_UDN_CA) | \ (1ULL << INT_UDN_CA) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DMA_ASID) | \ (1ULL << INT_DMA_ASID) | \
INT_MASK(INT_SNI_ASID) | \ (1ULL << INT_SNI_ASID) | \
INT_MASK(INT_DMA_CPL) | \ (1ULL << INT_DMA_CPL) | \
INT_MASK(INT_SN_CPL) | \ (1ULL << INT_SN_CPL) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
0) 0)
#define NONQUEUED_INTERRUPTS ( \ #define NONQUEUED_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_SN_ACCESS) | \ (1ULL << INT_SN_ACCESS) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_IDN_REFILL) | \ (1ULL << INT_IDN_REFILL) | \
INT_MASK(INT_UDN_REFILL) | \ (1ULL << INT_UDN_REFILL) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_SN_STATIC_ACCESS) | \ (1ULL << INT_SN_STATIC_ACCESS) | \
0) 0)
#define CRITICAL_MASKED_INTERRUPTS ( \ #define CRITICAL_MASKED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_DMATLB_MISS) | \ (1ULL << INT_DMATLB_MISS) | \
INT_MASK(INT_DMATLB_ACCESS) | \ (1ULL << INT_DMATLB_ACCESS) | \
INT_MASK(INT_SNITLB_MISS) | \ (1ULL << INT_SNITLB_MISS) | \
INT_MASK(INT_SN_NOTIFY) | \ (1ULL << INT_SN_NOTIFY) | \
INT_MASK(INT_SN_FIREWALL) | \ (1ULL << INT_SN_FIREWALL) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_DMA_NOTIFY) | \ (1ULL << INT_DMA_NOTIFY) | \
INT_MASK(INT_IDN_CA) | \ (1ULL << INT_IDN_CA) | \
INT_MASK(INT_UDN_CA) | \ (1ULL << INT_UDN_CA) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
0) 0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \ #define CRITICAL_UNMASKED_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_SN_ACCESS) | \ (1ULL << INT_SN_ACCESS) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_IDN_REFILL) | \ (1ULL << INT_IDN_REFILL) | \
INT_MASK(INT_UDN_REFILL) | \ (1ULL << INT_UDN_REFILL) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DMA_ASID) | \ (1ULL << INT_DMA_ASID) | \
INT_MASK(INT_SNI_ASID) | \ (1ULL << INT_SNI_ASID) | \
INT_MASK(INT_DMA_CPL) | \ (1ULL << INT_DMA_CPL) | \
INT_MASK(INT_SN_CPL) | \ (1ULL << INT_SN_CPL) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
INT_MASK(INT_SN_STATIC_ACCESS) | \ (1ULL << INT_SN_STATIC_ACCESS) | \
0) 0)
#define MASKABLE_INTERRUPTS ( \ #define MASKABLE_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_IDN_REFILL) | \ (1ULL << INT_IDN_REFILL) | \
INT_MASK(INT_UDN_REFILL) | \ (1ULL << INT_UDN_REFILL) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_DMATLB_MISS) | \ (1ULL << INT_DMATLB_MISS) | \
INT_MASK(INT_DMATLB_ACCESS) | \ (1ULL << INT_DMATLB_ACCESS) | \
INT_MASK(INT_SNITLB_MISS) | \ (1ULL << INT_SNITLB_MISS) | \
INT_MASK(INT_SN_NOTIFY) | \ (1ULL << INT_SN_NOTIFY) | \
INT_MASK(INT_SN_FIREWALL) | \ (1ULL << INT_SN_FIREWALL) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_DMA_NOTIFY) | \ (1ULL << INT_DMA_NOTIFY) | \
INT_MASK(INT_IDN_CA) | \ (1ULL << INT_IDN_CA) | \
INT_MASK(INT_UDN_CA) | \ (1ULL << INT_UDN_CA) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
0) 0)
#define UNMASKABLE_INTERRUPTS ( \ #define UNMASKABLE_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_SN_ACCESS) | \ (1ULL << INT_SN_ACCESS) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DMA_ASID) | \ (1ULL << INT_DMA_ASID) | \
INT_MASK(INT_SNI_ASID) | \ (1ULL << INT_SNI_ASID) | \
INT_MASK(INT_DMA_CPL) | \ (1ULL << INT_DMA_CPL) | \
INT_MASK(INT_SN_CPL) | \ (1ULL << INT_SN_CPL) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
INT_MASK(INT_SN_STATIC_ACCESS) | \ (1ULL << INT_SN_STATIC_ACCESS) | \
0) 0)
#define SYNC_INTERRUPTS ( \ #define SYNC_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_SN_ACCESS) | \ (1ULL << INT_SN_ACCESS) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_IDN_REFILL) | \ (1ULL << INT_IDN_REFILL) | \
INT_MASK(INT_UDN_REFILL) | \ (1ULL << INT_UDN_REFILL) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_SN_STATIC_ACCESS) | \ (1ULL << INT_SN_STATIC_ACCESS) | \
0) 0)
#define NON_SYNC_INTERRUPTS ( \ #define NON_SYNC_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_DMATLB_MISS) | \ (1ULL << INT_DMATLB_MISS) | \
INT_MASK(INT_DMATLB_ACCESS) | \ (1ULL << INT_DMATLB_ACCESS) | \
INT_MASK(INT_SNITLB_MISS) | \ (1ULL << INT_SNITLB_MISS) | \
INT_MASK(INT_SN_NOTIFY) | \ (1ULL << INT_SN_NOTIFY) | \
INT_MASK(INT_SN_FIREWALL) | \ (1ULL << INT_SN_FIREWALL) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_DMA_NOTIFY) | \ (1ULL << INT_DMA_NOTIFY) | \
INT_MASK(INT_IDN_CA) | \ (1ULL << INT_IDN_CA) | \
INT_MASK(INT_UDN_CA) | \ (1ULL << INT_UDN_CA) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DMA_ASID) | \ (1ULL << INT_DMA_ASID) | \
INT_MASK(INT_SNI_ASID) | \ (1ULL << INT_SNI_ASID) | \
INT_MASK(INT_DMA_CPL) | \ (1ULL << INT_DMA_CPL) | \
INT_MASK(INT_SN_CPL) | \ (1ULL << INT_SN_CPL) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
0) 0)
#endif /* !__ASSEMBLER__ */ #endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */ #endif /* !__ARCH_INTERRUPTS_H__ */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__ #ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__ #define __ARCH_INTERRUPTS_H__
#ifndef __KERNEL__
/** Mask for an interrupt. */ /** Mask for an interrupt. */
#ifdef __ASSEMBLER__ #ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */ /* Note: must handle breaking interrupts into high and low words manually. */
...@@ -22,6 +23,7 @@ ...@@ -22,6 +23,7 @@
#else #else
#define INT_MASK(intno) (1ULL << (intno)) #define INT_MASK(intno) (1ULL << (intno))
#endif #endif
#endif
/** Where a given interrupt executes */ /** Where a given interrupt executes */
...@@ -85,192 +87,192 @@ ...@@ -85,192 +87,192 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \ #define QUEUED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \ (1ULL << INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \ (1ULL << INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \ (1ULL << INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \ (1ULL << INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \ (1ULL << INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
0) 0)
#define NONQUEUED_INTERRUPTS ( \ #define NONQUEUED_INTERRUPTS ( \
INT_MASK(INT_SINGLE_STEP_3) | \ (1ULL << INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \ (1ULL << INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \ (1ULL << INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \ (1ULL << INT_SINGLE_STEP_0) | \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \ (1ULL << INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
0) 0)
#define CRITICAL_MASKED_INTERRUPTS ( \ #define CRITICAL_MASKED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_SINGLE_STEP_3) | \ (1ULL << INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \ (1ULL << INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \ (1ULL << INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \ (1ULL << INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \ (1ULL << INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \ (1ULL << INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \ (1ULL << INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \ (1ULL << INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \ (1ULL << INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
0) 0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \ #define CRITICAL_UNMASKED_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \ (1ULL << INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
0) 0)
#define MASKABLE_INTERRUPTS ( \ #define MASKABLE_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_SINGLE_STEP_3) | \ (1ULL << INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \ (1ULL << INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \ (1ULL << INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \ (1ULL << INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \ (1ULL << INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \ (1ULL << INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \ (1ULL << INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \ (1ULL << INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \ (1ULL << INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
0) 0)
#define UNMASKABLE_INTERRUPTS ( \ #define UNMASKABLE_INTERRUPTS ( \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \ (1ULL << INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
0) 0)
#define SYNC_INTERRUPTS ( \ #define SYNC_INTERRUPTS ( \
INT_MASK(INT_SINGLE_STEP_3) | \ (1ULL << INT_SINGLE_STEP_3) | \
INT_MASK(INT_SINGLE_STEP_2) | \ (1ULL << INT_SINGLE_STEP_2) | \
INT_MASK(INT_SINGLE_STEP_1) | \ (1ULL << INT_SINGLE_STEP_1) | \
INT_MASK(INT_SINGLE_STEP_0) | \ (1ULL << INT_SINGLE_STEP_0) | \
INT_MASK(INT_IDN_COMPLETE) | \ (1ULL << INT_IDN_COMPLETE) | \
INT_MASK(INT_UDN_COMPLETE) | \ (1ULL << INT_UDN_COMPLETE) | \
INT_MASK(INT_ITLB_MISS) | \ (1ULL << INT_ITLB_MISS) | \
INT_MASK(INT_ILL) | \ (1ULL << INT_ILL) | \
INT_MASK(INT_GPV) | \ (1ULL << INT_GPV) | \
INT_MASK(INT_IDN_ACCESS) | \ (1ULL << INT_IDN_ACCESS) | \
INT_MASK(INT_UDN_ACCESS) | \ (1ULL << INT_UDN_ACCESS) | \
INT_MASK(INT_SWINT_3) | \ (1ULL << INT_SWINT_3) | \
INT_MASK(INT_SWINT_2) | \ (1ULL << INT_SWINT_2) | \
INT_MASK(INT_SWINT_1) | \ (1ULL << INT_SWINT_1) | \
INT_MASK(INT_SWINT_0) | \ (1ULL << INT_SWINT_0) | \
INT_MASK(INT_ILL_TRANS) | \ (1ULL << INT_ILL_TRANS) | \
INT_MASK(INT_UNALIGN_DATA) | \ (1ULL << INT_UNALIGN_DATA) | \
INT_MASK(INT_DTLB_MISS) | \ (1ULL << INT_DTLB_MISS) | \
INT_MASK(INT_DTLB_ACCESS) | \ (1ULL << INT_DTLB_ACCESS) | \
0) 0)
#define NON_SYNC_INTERRUPTS ( \ #define NON_SYNC_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ (1ULL << INT_MEM_ERROR) | \
INT_MASK(INT_IDN_FIREWALL) | \ (1ULL << INT_IDN_FIREWALL) | \
INT_MASK(INT_UDN_FIREWALL) | \ (1ULL << INT_UDN_FIREWALL) | \
INT_MASK(INT_TILE_TIMER) | \ (1ULL << INT_TILE_TIMER) | \
INT_MASK(INT_AUX_TILE_TIMER) | \ (1ULL << INT_AUX_TILE_TIMER) | \
INT_MASK(INT_IDN_TIMER) | \ (1ULL << INT_IDN_TIMER) | \
INT_MASK(INT_UDN_TIMER) | \ (1ULL << INT_UDN_TIMER) | \
INT_MASK(INT_IDN_AVAIL) | \ (1ULL << INT_IDN_AVAIL) | \
INT_MASK(INT_UDN_AVAIL) | \ (1ULL << INT_UDN_AVAIL) | \
INT_MASK(INT_IPI_3) | \ (1ULL << INT_IPI_3) | \
INT_MASK(INT_IPI_2) | \ (1ULL << INT_IPI_2) | \
INT_MASK(INT_IPI_1) | \ (1ULL << INT_IPI_1) | \
INT_MASK(INT_IPI_0) | \ (1ULL << INT_IPI_0) | \
INT_MASK(INT_PERF_COUNT) | \ (1ULL << INT_PERF_COUNT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ (1ULL << INT_AUX_PERF_COUNT) | \
INT_MASK(INT_INTCTRL_3) | \ (1ULL << INT_INTCTRL_3) | \
INT_MASK(INT_INTCTRL_2) | \ (1ULL << INT_INTCTRL_2) | \
INT_MASK(INT_INTCTRL_1) | \ (1ULL << INT_INTCTRL_1) | \
INT_MASK(INT_INTCTRL_0) | \ (1ULL << INT_INTCTRL_0) | \
INT_MASK(INT_BOOT_ACCESS) | \ (1ULL << INT_BOOT_ACCESS) | \
INT_MASK(INT_WORLD_ACCESS) | \ (1ULL << INT_WORLD_ACCESS) | \
INT_MASK(INT_I_ASID) | \ (1ULL << INT_I_ASID) | \
INT_MASK(INT_D_ASID) | \ (1ULL << INT_D_ASID) | \
INT_MASK(INT_DOUBLE_FAULT) | \ (1ULL << INT_DOUBLE_FAULT) | \
0) 0)
#endif /* !__ASSEMBLER__ */ #endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */ #endif /* !__ARCH_INTERRUPTS_H__ */
...@@ -1035,7 +1035,9 @@ handle_syscall: ...@@ -1035,7 +1035,9 @@ handle_syscall:
/* Ensure that the syscall number is within the legal range. */ /* Ensure that the syscall number is within the legal range. */
{ {
moveli r20, hw2(sys_call_table) moveli r20, hw2(sys_call_table)
#ifdef CONFIG_COMPAT
blbs r30, .Lcompat_syscall blbs r30, .Lcompat_syscall
#endif
} }
{ {
cmpltu r21, TREG_SYSCALL_NR_NAME, r21 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
...@@ -1093,6 +1095,7 @@ handle_syscall: ...@@ -1093,6 +1095,7 @@ handle_syscall:
j .Lresume_userspace /* jump into middle of interrupt_return */ j .Lresume_userspace /* jump into middle of interrupt_return */
} }
#ifdef CONFIG_COMPAT
.Lcompat_syscall: .Lcompat_syscall:
/* /*
* Load the base of the compat syscall table in r20, and * Load the base of the compat syscall table in r20, and
...@@ -1117,6 +1120,7 @@ handle_syscall: ...@@ -1117,6 +1120,7 @@ handle_syscall:
{ move r15, r4; addxi r4, r4, 0 } { move r15, r4; addxi r4, r4, 0 }
{ move r16, r5; addxi r5, r5, 0 } { move r16, r5; addxi r5, r5, 0 }
j .Lload_syscall_pointer j .Lload_syscall_pointer
#endif
.Linvalid_syscall: .Linvalid_syscall:
/* Report an invalid syscall back to the user program */ /* Report an invalid syscall back to the user program */
......
...@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); ...@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
int copy_thread(unsigned long clone_flags, unsigned long sp, int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p) unsigned long arg, struct task_struct *p)
{ {
struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); struct pt_regs *childregs = task_pt_regs(p);
unsigned long ksp; unsigned long ksp;
unsigned long *callee_regs; unsigned long *callee_regs;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/export.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <hv/hypervisor.h> #include <hv/hypervisor.h>
...@@ -49,3 +50,4 @@ void machine_restart(char *cmd) ...@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
/* No interesting distinction to be made here. */ /* No interesting distinction to be made here. */
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/start_kernel.h> #include <linux/start_kernel.h>
#include <linux/screen_info.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } ...@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */ /* Chip information */
char chip_model[64] __write_once; char chip_model[64] __write_once;
#ifdef CONFIG_VT
struct screen_info screen_info;
#endif
struct pglist_data node_data[MAX_NUMNODES] __read_mostly; struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
......
...@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) ...@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
p->pc, p->sp, p->ex1); p->pc, p->sp, p->ex1);
p = NULL; p = NULL;
} }
if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
return p; return p;
return NULL; return NULL;
} }
...@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) ...@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
{ {
save_stack_trace_tsk(NULL, trace); save_stack_trace_tsk(NULL, trace);
} }
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif #endif
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
* more details. * more details.
*/ */
#include <linux/export.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <arch/icache.h> #include <arch/icache.h>
...@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) ...@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif #endif
} }
EXPORT_SYMBOL_GPL(finv_buffer_remote);
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/export.h>
/* /*
* Allow cropping out bits beyond the end of the array. * Allow cropping out bits beyond the end of the array.
...@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) ...@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
} while (*bp != '\0' && *bp != '\n'); } while (*bp != '\0' && *bp != '\n');
return 0; return 0;
} }
EXPORT_SYMBOL(bitmap_parselist_crop);
...@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); ...@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
EXPORT_SYMBOL(hv_dev_close); EXPORT_SYMBOL(hv_dev_close);
EXPORT_SYMBOL(hv_sysconf); EXPORT_SYMBOL(hv_sysconf);
EXPORT_SYMBOL(hv_confstr); EXPORT_SYMBOL(hv_confstr);
EXPORT_SYMBOL(hv_get_rtc);
EXPORT_SYMBOL(hv_set_rtc);
/* libgcc.a */ /* libgcc.a */
uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
......
...@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) ...@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
__set_pte(ptep, pte_set_home(pteval, home)); __set_pte(ptep, pte_set_home(pteval, home));
} }
} }
EXPORT_SYMBOL(homecache_change_page_home);
struct page *homecache_alloc_pages(gfp_t gfp_mask, struct page *homecache_alloc_pages(gfp_t gfp_mask,
unsigned int order, int home) unsigned int order, int home)
......
...@@ -3,6 +3,90 @@ ...@@ -3,6 +3,90 @@
#include <uapi/asm/mce.h> #include <uapi/asm/mce.h>
/*
* Machine Check support for x86
*/
/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
/* MCi_STATUS register defines */
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
#define MCACOD 0xffff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
#define MCACOD_SCRUBMSK 0xfff0
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
#define MCI_MISC_ADDR_PHYS 2 /* physical address */
#define MCI_MISC_ADDR_MEM 3 /* memory address */
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN (1ULL << 30)
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
#define MCJ_CTX_MASK 3
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
#define MCJ_CTX_RANDOM 0 /* inject context: random */
#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
#define MCJ_EXCEPTION 0x8 /* raise as exception */
#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
#define MCE_LOG_LEN 32
#define MCE_LOG_SIGNATURE "MACHINECHECK"
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct mce */
struct mce entry[MCE_LOG_LEN];
};
struct mca_config { struct mca_config {
bool dont_log_ce; bool dont_log_ce;
......
...@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd) ...@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
} }
static inline unsigned long pud_pfn(pud_t pud)
{
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte) static inline int pmd_large(pmd_t pte)
......
...@@ -4,66 +4,6 @@ ...@@ -4,66 +4,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/ioctls.h> #include <asm/ioctls.h>
/*
* Machine Check support for x86
*/
/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
/* MCi_STATUS register defines */
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
#define MCACOD 0xffff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
#define MCACOD_SCRUBMSK 0xfff0
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
#define MCI_MISC_ADDR_PHYS 2 /* physical address */
#define MCI_MISC_ADDR_MEM 3 /* memory address */
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN (1ULL << 30)
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
#define MCJ_CTX_MASK 3
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
#define MCJ_CTX_RANDOM 0 /* inject context: random */
#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
#define MCJ_EXCEPTION 0x8 /* raise as exception */
#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
/* Fields are zero when not available */ /* Fields are zero when not available */
struct mce { struct mce {
__u64 status; __u64 status;
...@@ -87,35 +27,8 @@ struct mce { ...@@ -87,35 +27,8 @@ struct mce {
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
}; };
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
#define MCE_LOG_LEN 32
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct mce */
struct mce entry[MCE_LOG_LEN];
};
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
#define MCE_LOG_SIGNATURE "MACHINECHECK"
#define MCE_GET_RECORD_LEN _IOR('M', 1, int) #define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int) #define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
#endif /* _UAPI_ASM_X86_MCE_H */ #endif /* _UAPI_ASM_X86_MCE_H */
...@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) ...@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
} }
early_param("x2apic_phys", set_x2apic_phys_mode); early_param("x2apic_phys", set_x2apic_phys_mode);
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) static bool x2apic_fadt_phys(void)
{ {
if (x2apic_phys) if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
return x2apic_enabled(); (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
x2apic_enabled()) {
printk(KERN_DEBUG "System requires x2apic physical mode\n"); printk(KERN_DEBUG "System requires x2apic physical mode\n");
return 1; return true;
} }
else return false;
return 0; }
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
} }
static void static void
...@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void) ...@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
static int x2apic_phys_probe(void) static int x2apic_phys_probe(void)
{ {
if (x2apic_mode && x2apic_phys) if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
return 1; return 1;
return apic == &apic_x2apic_phys; return apic == &apic_x2apic_phys;
......
...@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, ...@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return; return;
} }
#endif #endif
/* Kernel addresses are always protection faults: */
if (address >= TASK_SIZE)
error_code |= PF_PROT;
if (unlikely(show_unhandled_signals)) if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk); show_signal_msg(regs, error_code, address, tsk);
/* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address; tsk->thread.cr2 = address;
tsk->thread.error_code = error_code | (address >= TASK_SIZE); tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.trap_nr = X86_TRAP_PF;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
......
...@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) ...@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
if (pud_none(*pud)) if (pud_none(*pud))
return 0; return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return 0; return 0;
......
...@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object) ...@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
nv_debug(falcon, "data limit: %d\n", falcon->data.limit); nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
/* wait for 'uc halted' to be signalled before continuing */ /* wait for 'uc halted' to be signalled before continuing */
if (falcon->secret) { if (falcon->secret && falcon->version < 4) {
nv_wait(falcon, 0x008, 0x00000010, 0x00000010); if (!falcon->version)
nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
else
nv_wait(falcon, 0x180, 0x80000000, 0);
nv_wo32(falcon, 0x004, 0x00000010); nv_wo32(falcon, 0x004, 0x00000010);
} }
......
...@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent, ...@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (ret) if (ret)
return ret; return ret;
mutex_init(&subdev->mutex); __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
subdev->name = subname; subdev->name = subname;
if (parent) { if (parent) {
......
...@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend); ...@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
extern struct nouveau_ofuncs nouveau_object_ofuncs; extern struct nouveau_ofuncs nouveau_object_ofuncs;
/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
* ".data". */
struct nouveau_oclass { struct nouveau_oclass {
u32 handle; u32 handle;
struct nouveau_ofuncs *ofuncs; struct nouveau_ofuncs * const ofuncs;
struct nouveau_omthds *omthds; struct nouveau_omthds * const omthds;
struct lock_class_key lock_class_key;
}; };
#define nv_oclass(o) nv_object(o)->oclass #define nv_oclass(o) nv_object(o)->oclass
......
...@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb) ...@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
return ret; return ret;
} }
if (!nouveau_mm_initialised(&pfb->tags) && tags) { if (!nouveau_mm_initialised(&pfb->tags)) {
ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) ...@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
struct nouveau_bios *bios = nouveau_bios(device); struct nouveau_bios *bios = nouveau_bios(device);
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 size; u32 size, tags = 0;
int ret; int ret;
pfb->ram.size = nv_rd32(pfb, 0x10020c); pfb->ram.size = nv_rd32(pfb, 0x10020c);
...@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) ...@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
return ret; return ret;
pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
tags = nv_rd32(pfb, 0x100320);
break; break;
} }
return nv_rd32(pfb, 0x100320); return tags;
} }
static int static int
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
*/ */
#include <core/engine.h> #include <core/engine.h>
#include <linux/swiotlb.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/vm.h> #include <subdev/vm.h>
......
...@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, ...@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0; return 0;
} }
static struct lock_class_key drm_client_lock_class_key;
static int static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{ {
...@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret) if (ret)
return ret; return ret;
lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm; dev->dev_private = drm;
drm->dev = dev; drm->dev = dev;
......
...@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL; return -EINVAL;
} }
if (tiled) { if (tiled) {
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7; p->idx += count + 7;
} else { } else {
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
...@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
dst2_offset = ib[idx+2]; dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8; dst2_offset <<= 8;
src_offset = ib[idx+8]; src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
...@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
dst2_offset = ib[idx+2]; dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8; dst2_offset <<= 8;
src_offset = ib[idx+8]; src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
...@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */ /* detile bit */
if (idx_value & (1 << 31)) { if (idx_value & (1 << 31)) {
/* tiled src, linear dst */ /* tiled src, linear dst */
src_offset = ib[idx+1]; src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8; src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
dst_offset = ib[idx+7]; dst_offset = radeon_get_ib_value(p, idx+7);
dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else { } else {
/* linear src, tiled dst */ /* linear src, tiled dst */
src_offset = ib[idx+7]; src_offset = radeon_get_ib_value(p, idx+7);
src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
} }
...@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
dst2_offset = ib[idx+2]; dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8; dst2_offset <<= 8;
src_offset = ib[idx+8]; src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
...@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */ /* detile bit */
if (idx_value & (1 << 31)) { if (idx_value & (1 << 31)) {
/* tiled src, linear dst */ /* tiled src, linear dst */
src_offset = ib[idx+1]; src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8; src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
dst_offset = ib[idx+7]; dst_offset = radeon_get_ib_value(p, idx+7);
dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else { } else {
/* linear src, tiled dst */ /* linear src, tiled dst */
src_offset = ib[idx+7]; src_offset = radeon_get_ib_value(p, idx+7);
src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
} }
...@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
switch (misc) { switch (misc) {
case 0: case 0:
/* L2L, byte */ /* L2L, byte */
src_offset = ib[idx+2]; src_offset = radeon_get_ib_value(p, idx+2);
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
src_offset + count, radeon_bo_size(src_reloc->robj)); src_offset + count, radeon_bo_size(src_reloc->robj));
...@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
dst2_offset = ib[idx+2]; dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
src_offset = ib[idx+3]; src_offset = radeon_get_ib_value(p, idx+3);
src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
...@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
} }
} else { } else {
/* L2L, dw */ /* L2L, dw */
src_offset = ib[idx+2]; src_offset = radeon_get_ib_value(p, idx+2);
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
...@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset, radeon_bo_size(dst_reloc->robj)); dst_offset, radeon_bo_size(dst_reloc->robj));
......
...@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL; return -EINVAL;
} }
if (tiled) { if (tiled) {
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 5; p->idx += count + 5;
} else { } else {
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
...@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */ /* detile bit */
if (idx_value & (1 << 31)) { if (idx_value & (1 << 31)) {
/* tiled src, linear dst */ /* tiled src, linear dst */
src_offset = ib[idx+1]; src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8; src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
dst_offset = ib[idx+5]; dst_offset = radeon_get_ib_value(p, idx+5);
dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else { } else {
/* linear src, tiled dst */ /* linear src, tiled dst */
src_offset = ib[idx+5]; src_offset = radeon_get_ib_value(p, idx+5);
src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8; dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
} }
p->idx += 7; p->idx += 7;
} else { } else {
if (p->family >= CHIP_RV770) { if (p->family >= CHIP_RV770) {
src_offset = ib[idx+2]; src_offset = radeon_get_ib_value(p, idx+2);
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
...@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
p->idx += 5; p->idx += 5;
} else { } else {
src_offset = ib[idx+2]; src_offset = radeon_get_ib_value(p, idx+2);
src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
...@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) ...@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n"); DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL; return -EINVAL;
} }
dst_offset = ib[idx+1]; dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swiotlb.h>
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon.h" #include "radeon.h"
......
...@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void) ...@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = get_cycles(); } while (0) #define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x)) #define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC" #define TIME_NAME "PCC"
#elif defined(CONFIG_MN10300) #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0) #define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x, y) ((x) - (y)) #define DELTA(x, y) ((x) - (y))
#define TIME_NAME "TSC" #define TIME_NAME "TSC"
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "atl1c.h" #include "atl1c.h"
#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" #define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c"; char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION; char atl1c_driver_version[] = ATL1C_DRV_VERSION;
...@@ -1649,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) ...@@ -1649,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
u16 num_alloc = 0; u16 num_alloc = 0;
u16 rfd_next_to_use, next_next; u16 rfd_next_to_use, next_next;
struct atl1c_rx_free_desc *rfd_desc; struct atl1c_rx_free_desc *rfd_desc;
dma_addr_t mapping;
next_next = rfd_next_to_use = rfd_ring->next_to_use; next_next = rfd_next_to_use = rfd_ring->next_to_use;
if (++next_next == rfd_ring->count) if (++next_next == rfd_ring->count)
...@@ -1675,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) ...@@ -1675,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb; buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len; buffer_info->length = adapter->rx_buffer_len;
buffer_info->dma = pci_map_single(pdev, vir_addr, mapping = pci_map_single(pdev, vir_addr,
buffer_info->length, buffer_info->length,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->length = 0;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
break;
}
buffer_info->dma = mapping;
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_FROMDEVICE); ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
...@@ -2012,7 +2022,29 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter, ...@@ -2012,7 +2022,29 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
return 0; return 0;
} }
static void atl1c_tx_map(struct atl1c_adapter *adapter, static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
struct atl1c_tpd_desc *first_tpd,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct atl1c_tpd_desc *tpd;
u16 first_index, index;
first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
index = first_index;
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
}
tpd_ring->next_to_use = first_index;
}
static int atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd, struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type) enum atl1c_trans_queue type)
{ {
...@@ -2037,7 +2069,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, ...@@ -2037,7 +2069,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->length = map_len; buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev, buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE); skb->data, hdr_len, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); if (unlikely(pci_dma_mapping_error(adapter->pdev,
buffer_info->dma)))
goto err_dma;
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE); ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len; mapped_len += map_len;
...@@ -2059,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, ...@@ -2059,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma = buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len, pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE); buffer_info->length, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(adapter->pdev,
buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE); ATL1C_PCIMAP_TODEVICE);
...@@ -2080,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, ...@@ -2080,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
frag, 0, frag, 0,
buffer_info->length, buffer_info->length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE); ATL1C_PCIMAP_TODEVICE);
...@@ -2092,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, ...@@ -2092,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
/* The last buffer info contain the skb address, /* The last buffer info contain the skb address,
so it will be free after unmap */ so it will be free after unmap */
buffer_info->skb = skb; buffer_info->skb = skb;
return 0;
err_dma:
buffer_info->dma = 0;
buffer_info->length = 0;
return -1;
} }
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
...@@ -2154,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, ...@@ -2154,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN) if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
atl1c_tx_map(adapter, skb, tpd, type); if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
atl1c_tx_queue(adapter, skb, tpd, type); netif_info(adapter, tx_done, adapter->netdev,
"tx-skb droppted due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb(skb);
} else {
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget) ...@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
* get notified when new packets arrive. * get notified when new packets arrive.
*/ */
macb_writel(bp, IER, MACB_RX_INT_FLAGS); macb_writel(bp, IER, MACB_RX_INT_FLAGS);
/* Packets received while interrupts were disabled */
status = macb_readl(bp, RSR);
if (unlikely(status))
napi_reschedule(napi);
} }
/* TODO: Handle errors */ /* TODO: Handle errors */
......
...@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str) ...@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "pause:", 6)) { } else if (!strncmp(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause)) if (kstrtoint(opt + 6, 0, &pause))
goto err; goto err;
} else if (!strncmp(opt, "eee_timer:", 6)) { } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer)) if (kstrtoint(opt + 10, 0, &eee_timer))
goto err; goto err;
} }
......
...@@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) ...@@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if ((intf->num_altsetting == 2) && if ((intf->num_altsetting == 2) &&
!usb_set_interface(dev->udev, !usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber, intf->cur_altsetting->desc.bInterfaceNumber,
CDC_NCM_COMM_ALTSETTING_MBIM) && CDC_NCM_COMM_ALTSETTING_MBIM)) {
cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
return -ENODEV; return -ENODEV;
else
usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber,
CDC_NCM_COMM_ALTSETTING_NCM);
}
#endif #endif
/* NCM data altsetting is always 1 */ /* NCM data altsetting is always 1 */
......
...@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) ...@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
static void xenvif_down(struct xenvif *vif) static void xenvif_down(struct xenvif *vif)
{ {
disable_irq(vif->irq); disable_irq(vif->irq);
del_timer_sync(&vif->credit_timeout);
xen_netbk_deschedule_xenvif(vif); xen_netbk_deschedule_xenvif(vif);
xen_netbk_remove_xenvif(vif); xen_netbk_remove_xenvif(vif);
} }
...@@ -365,8 +366,6 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -365,8 +366,6 @@ void xenvif_disconnect(struct xenvif *vif)
atomic_dec(&vif->refcnt); atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
del_timer_sync(&vif->credit_timeout);
if (vif->irq) if (vif->irq)
unbind_from_irqhandler(vif->irq, vif); unbind_from_irqhandler(vif->irq, vif);
......
...@@ -911,13 +911,13 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -911,13 +911,13 @@ static int netbk_count_requests(struct xenvif *vif,
if (frags >= work_to_do) { if (frags >= work_to_do) {
netdev_err(vif->dev, "Need more frags\n"); netdev_err(vif->dev, "Need more frags\n");
netbk_fatal_tx_err(vif); netbk_fatal_tx_err(vif);
return -frags; return -ENODATA;
} }
if (unlikely(frags >= MAX_SKB_FRAGS)) { if (unlikely(frags >= MAX_SKB_FRAGS)) {
netdev_err(vif->dev, "Too many frags\n"); netdev_err(vif->dev, "Too many frags\n");
netbk_fatal_tx_err(vif); netbk_fatal_tx_err(vif);
return -frags; return -E2BIG;
} }
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
...@@ -925,7 +925,7 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -925,7 +925,7 @@ static int netbk_count_requests(struct xenvif *vif,
if (txp->size > first->size) { if (txp->size > first->size) {
netdev_err(vif->dev, "Frag is bigger than frame.\n"); netdev_err(vif->dev, "Frag is bigger than frame.\n");
netbk_fatal_tx_err(vif); netbk_fatal_tx_err(vif);
return -frags; return -EIO;
} }
first->size -= txp->size; first->size -= txp->size;
...@@ -935,7 +935,7 @@ static int netbk_count_requests(struct xenvif *vif, ...@@ -935,7 +935,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev, "txp->offset: %x, size: %u\n", netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size); txp->offset, txp->size);
netbk_fatal_tx_err(vif); netbk_fatal_tx_err(vif);
return -frags; return -EINVAL;
} }
} while ((txp++)->flags & XEN_NETTXF_more_data); } while ((txp++)->flags & XEN_NETTXF_more_data);
return frags; return frags;
......
...@@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
/* Enable the clockwatch on ST Variants */ /* Enable the clockwatch on ST Variants */
if (vendor->clockwatch) if (vendor->clockwatch)
data |= RTC_CR_CWEN; data |= RTC_CR_CWEN;
writel(data | RTC_CR_EN, ldata->base + RTC_CR); else
data |= RTC_CR_EN;
writel(data, ldata->base + RTC_CR);
/* /*
* On ST PL031 variants, the RTC reset value does not provide correct * On ST PL031 variants, the RTC reset value does not provide correct
......
...@@ -1041,7 +1041,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) ...@@ -1041,7 +1041,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
} }
inline void sk_refcnt_debug_release(const struct sock *sk) static inline void sk_refcnt_debug_release(const struct sock *sk)
{ {
if (atomic_read(&sk->sk_refcnt) != 1) if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
......
...@@ -28,25 +28,16 @@ ...@@ -28,25 +28,16 @@
#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
/* /*
* Architectures where both 32- and 64-bit binaries can be executed * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
* on 64-bit kernels need this. This keeps the structure format * back to the kernel via ioctl from userspace. On architectures where 32- and
* uniform, and makes sure the wait_queue_token isn't too big to be * 64-bit userspace binaries can be executed it's important that the size of
* passed back down to the kernel. * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
* * do not break the binary ABI interface by changing the structure size.
* This assumes that on these architectures:
* mode 32 bit 64 bit
* -------------------------
* int 32 bit 32 bit
* long 32 bit 64 bit
*
* If so, 32-bit user-space code should be backwards compatible.
*/ */
#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
|| defined(__powerpc__) || defined(__s390__)
typedef unsigned int autofs_wqt_t;
#else
typedef unsigned long autofs_wqt_t; typedef unsigned long autofs_wqt_t;
#else
typedef unsigned int autofs_wqt_t;
#endif #endif
/* Packet types */ /* Packet types */
......
...@@ -331,7 +331,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) ...@@ -331,7 +331,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
return pid; return pid;
out_unlock: out_unlock:
spin_unlock(&pidmap_lock); spin_unlock_irq(&pidmap_lock);
out_free: out_free:
while (++i <= ns->level) while (++i <= ns->level)
free_pidmap(pid->numbers + i); free_pidmap(pid->numbers + i);
......
...@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, ...@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
if (memcg) { if (memcg) {
s->memcg_params->memcg = memcg; s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache; s->memcg_params->root_cache = root_cache;
} } else
s->memcg_params->is_root_cache = true;
return 0; return 0;
} }
......
...@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) ...@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
static int do_mlockall(int flags) static int do_mlockall(int flags)
{ {
struct vm_area_struct * vma, * prev = NULL; struct vm_area_struct * vma, * prev = NULL;
unsigned int def_flags = 0;
if (flags & MCL_FUTURE) if (flags & MCL_FUTURE)
def_flags = VM_LOCKED; current->mm->def_flags |= VM_LOCKED;
current->mm->def_flags = def_flags; else
current->mm->def_flags &= ~VM_LOCKED;
if (flags == MCL_FUTURE) if (flags == MCL_FUTURE)
goto out; goto out;
......
...@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page) ...@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_pageblock_migratetype(page, MIGRATE_CMA); set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order); __free_pages(page, pageblock_order);
totalram_pages += pageblock_nr_pages; totalram_pages += pageblock_nr_pages;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages += pageblock_nr_pages;
#endif
} }
#endif #endif
......
...@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, ...@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
/* this is an hash collision with the temporary selected node. Choose /* this is an hash collision with the temporary selected node. Choose
* the one with the lowest address * the one with the lowest address
*/ */
if ((tmp_max == max) && if ((tmp_max == max) && max_orig_node &&
(batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
goto out; goto out;
......
...@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net) ...@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
if (table == NULL) if (table == NULL)
goto err_alloc; goto err_alloc;
table[0].data = &net->ipv6.frags.high_thresh; table[0].data = &net->nf_frag.frags.timeout;
table[1].data = &net->ipv6.frags.low_thresh; table[1].data = &net->nf_frag.frags.low_thresh;
table[2].data = &net->ipv6.frags.timeout; table[2].data = &net->nf_frag.frags.high_thresh;
} }
hdr = register_net_sysctl(net, "net/netfilter", table); hdr = register_net_sysctl(net, "net/netfilter", table);
......
...@@ -236,7 +236,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, ...@@ -236,7 +236,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
/* We only allow helper re-assignment of the same sort since /* We only allow helper re-assignment of the same sort since
* we cannot reallocate the helper extension area. * we cannot reallocate the helper extension area.
*/ */
if (help->helper != helper) { struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
if (tmp && tmp->help != helper->help) {
RCU_INIT_POINTER(help->helper, NULL); RCU_INIT_POINTER(help->helper, NULL);
goto out; goto out;
} }
......
...@@ -1782,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, ...@@ -1782,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_CREATE) { if (nlh->nlmsg_flags & NLM_F_CREATE) {
enum ip_conntrack_events events; enum ip_conntrack_events events;
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
&rtuple, u3); &rtuple, u3);
if (IS_ERR(ct)) if (IS_ERR(ct))
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
# #
menuconfig IP_SCTP menuconfig IP_SCTP
tristate "The SCTP Protocol (EXPERIMENTAL)" tristate "The SCTP Protocol"
depends on INET && EXPERIMENTAL depends on INET
depends on IPV6 || IPV6=n depends on IPV6 || IPV6=n
select CRYPTO select CRYPTO
select CRYPTO_HMAC select CRYPTO_HMAC
......
...@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, ...@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/ */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) { list_for_each_entry_rcu(laddr, &bp->address_list, list) {
if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) if (!laddr->valid)
continue; continue;
if ((laddr->a.sa.sa_family == AF_INET6) && if ((laddr->state == SCTP_ADDR_SRC) &&
(laddr->a.sa.sa_family == AF_INET6) &&
(scope <= sctp_scope(&laddr->a))) { (scope <= sctp_scope(&laddr->a))) {
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
if (!baddr || (matchlen < bmatchlen)) { if (!baddr || (matchlen < bmatchlen)) {
......
...@@ -774,6 +774,7 @@ void tipc_bclink_init(void) ...@@ -774,6 +774,7 @@ void tipc_bclink_init(void)
bcl->owner = &bclink->node; bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
spin_lock_init(&bcbearer->bearer.lock);
bcl->b_ptr = &bcbearer->bearer; bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING; bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册