diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bd6dd6ed3a9f1c30b4ae508057a264348b117b3f..fefe7c8bf05f54725b2edb7acbcc801c95c6431c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -414,7 +414,7 @@ config KEXEC config CRASH_DUMP bool "Build a kdump crash kernel" depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE + select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE help Build a kernel suitable for use as a kdump capture kernel. The same kernel binary can be used as production kernel and dump @@ -1017,6 +1017,7 @@ endmenu if PPC64 config RELOCATABLE bool "Build a relocatable kernel" + depends on !COMPILE_TEST select NONSTATIC_KERNEL help This builds a kernel image that is capable of running anywhere diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index f8d1d6dcf7db63c5947aa2be4fa22564187d9953..e61f24ed4e65223290901ee23fe79d3354f82196 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -19,8 +19,7 @@ #define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) -#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) -#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040) +#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) /* * This is individual features @@ -106,13 +105,6 @@ MMU_FTR_CI_LARGE_PAGE #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B -#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \ - MMU_FTR_USE_TLBIVAX_BCAST | \ - MMU_FTR_LOCK_BCAST_INVAL | \ - MMU_FTR_USE_TLBRSRV | \ - MMU_FTR_USE_PAIRED_MAS | \ - MMU_FTR_TLBIEL | \ - MMU_FTR_16M_PAGE #ifndef __ASSEMBLY__ #include <asm/cputable.h> diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 9ed737146dbb3cf5f2253697cbdc96ab8fb07077..b3e936027b26353a82ddf627306772d31e972739 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -61,8 +61,7 @@ struct power_pmu { #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ -#define PPMU_EBB 0x00000100 /* supports event based branch */ +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 2480256272d4b5c84cd9654b48498f9d1bd9d519..5cf3d367190df77cdde609d5a926ce3269b20a4a 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -131,7 +131,7 @@ _GLOBAL(power7_nap) _GLOBAL(power7_sleep) li r3,1 - li r4,0 + li r4,1 b power7_powersave_common /* No return */ diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 8c86422a1e37a706627dac1ac49ea19ed89a0ee7..731be7478b27ddbec69e52e61bc94edd5630c91c 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -127,11 +127,6 @@ BEGIN_FTR_SECTION stw r10, HSTATE_PMC + 24(r13) stw r11, HSTATE_PMC + 28(r13) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) -BEGIN_FTR_SECTION - mfspr r9, SPRN_SIER - std r8, HSTATE_MMCR + 40(r13) - std r9, HSTATE_MMCR + 48(r13) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 31: /* diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index af3d78e193026a6df95f6b4946fdf5a74236a864..928ebe79668b95e6b60fe9819276c1a89e81053d 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -410,17 +410,7 @@ void __init mmu_context_init(void) } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; last_context = 65535; - } else -#ifdef CONFIG_PPC_BOOK3E_MMU - if (mmu_has_feature(MMU_FTR_TYPE_3E)) { - u32 mmucfg = mfspr(SPRN_MMUCFG); - u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK) - >> MMUCFG_PIDSIZE_SHIFT; - first_context = 1; - last_context = (1UL << (pid_bits + 1)) - 1; - } else -#endif - { + } else { first_context = 1; last_context = 255; } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 4520c9356b5473bdf5ae880d214bb4076cee2c3d..6b0641c3f03f097c4aef760c9aa5fa307d2bcd5b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event) * check that the PMU supports EBB, meaning those that don't can still * use bit 63 of the event code for something else if they wish. */ - return (ppmu->flags & PPMU_EBB) && + return (ppmu->flags & PPMU_ARCH_207S) && ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); } @@ -777,7 +777,7 @@ void perf_event_print_debug(void) if (ppmu->flags & PPMU_HAS_SIER) sier = mfspr(SPRN_SIER); - if (ppmu->flags & PPMU_EBB) { + if (ppmu->flags & PPMU_ARCH_207S) { pr_info("MMCR2: %016lx EBBHR: %016lx\n", mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); pr_info("EBBRR: %016lx BESCR: %016lx\n", @@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event) } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); - local64_sub(delta, &event->hw.period_left); + + /* + * A number of places program the PMC with (0x80000000 - period_left). + * We never want period_left to be less than 1 because we will program + * the PMC with a value >= 0x800000000 and an edge detected PMC will + * roll around to 0 before taking an exception. We have seen this + * on POWER8. + * + * To fix this, clamp the minimum value of period_left to 1. + */ + do { + prev = local64_read(&event->hw.period_left); + val = prev - delta; + if (val < 1) + val = 1; + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); } /* @@ -1300,6 +1315,9 @@ static void power_pmu_enable(struct pmu *pmu) write_mmcr0(cpuhw, mmcr0); + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, 0); + /* * Enable instruction sampling if necessary */ @@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event) if (has_branch_stack(event)) { /* PMU has BHRB enabled */ - if (!(ppmu->flags & PPMU_BHRB)) + if (!(ppmu->flags & PPMU_ARCH_207S)) return -EOPNOTSUPP; } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index fe2763b6e039cfc5773ba8255731a3f7d7228d78..639cd9156585f797a82c138eee2474f3eda84242 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = { .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, .cache_events = &power8_cache_events, diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 38e0a1a5cec34d500475ff3aa22e08254fe82174..5e6e0bad6db6ca2e940c40a648cc81eb130faaeb 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) return ret; } +#ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { struct spufs_calls *calls; @@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm) return ret; } +#endif void notify_spus_active(void) { diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index b9d5d678aa44ccf2481cef76586c8dbb1f3c0ab3..52a7d2596d30fe285e309c8694490d0b695add33 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -1,8 +1,9 @@ obj-$(CONFIG_SPU_FS) += spufs.o -spufs-y += inode.o file.o context.o syscalls.o coredump.o +spufs-y += inode.o file.o context.o syscalls.o spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o spufs-y += switch.o fault.o lscsa_alloc.o +spufs-$(CONFIG_COREDUMP) += coredump.o # magic for the trace events CFLAGS_sched.o := -I$(src) diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index b045fdda484529f31a5a2ee56766ac13c7b4c903..a87200a535fae99065a5aafb27775f736c31aa5c 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, struct spufs_calls spufs_calls = { .create_thread = do_spu_create, .spu_run = do_spu_run, - .coredump_extra_notes_size = spufs_coredump_extra_notes_size, - .coredump_extra_notes_write = spufs_coredump_extra_notes_write, .notify_spus_active = do_notify_spus_active, .owner = THIS_MODULE, +#ifdef CONFIG_COREDUMP + .coredump_extra_notes_size = spufs_coredump_extra_notes_size, + .coredump_extra_notes_write = spufs_coredump_extra_notes_write, +#endif };