提交 c7c3ae25 编写于 作者: L Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "Here are a few more powerpc fixes for 3.16

  There's a small series of 3 patches that fix saving/restoring MMUCR2
  when using KVM without which perf goes completely bonkers in the host
  system.  Another perf fix from Anton that's been rotting away in
  patchwork due to my poor eyesight, a couple of compile fixes, a little
  addition to the WSP removal by Michael (removing a bit more dead
  stuff) and a fix for an embarassing regression with our soft irq
  masking"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/perf: Never program book3s PMCs with values >= 0x80000000
  powerpc: Disable RELOCATABLE for COMPILE_TEST with PPC64
  powerpc/perf: Clear MMCR2 when enabling PMU
  powerpc/perf: Add PPMU_ARCH_207S define
  powerpc/kvm: Remove redundant save of SIER AND MMCR2
  powerpc/powernv: Check for IRQHAPPENED before sleeping
  powerpc: Clean up MMU_FTRS_A2 and MMU_FTR_TYPE_3E
  powerpc/cell: Fix compilation with CONFIG_COREDUMP=n
......@@ -414,7 +414,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
......@@ -1017,6 +1017,7 @@ endmenu
if PPC64
config RELOCATABLE
bool "Build a relocatable kernel"
depends on !COMPILE_TEST
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running anywhere
......
......@@ -19,8 +19,7 @@
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
/*
* This is individual features
......@@ -106,13 +105,6 @@
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
MMU_FTR_USE_TLBIVAX_BCAST | \
MMU_FTR_LOCK_BCAST_INVAL | \
MMU_FTR_USE_TLBRSRV | \
MMU_FTR_USE_PAIRED_MAS | \
MMU_FTR_TLBIEL | \
MMU_FTR_16M_PAGE
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
......
......@@ -61,8 +61,7 @@ struct power_pmu {
#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
#define PPMU_EBB 0x00000100 /* supports event based branch */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
/*
* Values for flags to get_alternatives()
......
......@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
_GLOBAL(power7_sleep)
li r3,1
li r4,0
li r4,1
b power7_powersave_common
/* No return */
......
......@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
stw r10, HSTATE_PMC + 24(r13)
stw r11, HSTATE_PMC + 28(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
BEGIN_FTR_SECTION
mfspr r9, SPRN_SIER
std r8, HSTATE_MMCR + 40(r13)
std r9, HSTATE_MMCR + 48(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
31:
/*
......
......@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
} else
#ifdef CONFIG_PPC_BOOK3E_MMU
if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
u32 mmucfg = mfspr(SPRN_MMUCFG);
u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
>> MMUCFG_PIDSIZE_SHIFT;
first_context = 1;
last_context = (1UL << (pid_bits + 1)) - 1;
} else
#endif
{
} else {
first_context = 1;
last_context = 255;
}
......
......@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
* check that the PMU supports EBB, meaning those that don't can still
* use bit 63 of the event code for something else if they wish.
*/
return (ppmu->flags & PPMU_EBB) &&
return (ppmu->flags & PPMU_ARCH_207S) &&
((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
......@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
if (ppmu->flags & PPMU_HAS_SIER)
sier = mfspr(SPRN_SIER);
if (ppmu->flags & PPMU_EBB) {
if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
......@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
/*
* A number of places program the PMC with (0x80000000 - period_left).
* We never want period_left to be less than 1 because we will program
* the PMC with a value >= 0x800000000 and an edge detected PMC will
* roll around to 0 before taking an exception. We have seen this
* on POWER8.
*
* To fix this, clamp the minimum value of period_left to 1.
*/
do {
prev = local64_read(&event->hw.period_left);
val = prev - delta;
if (val < 1)
val = 1;
} while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
......@@ -1300,6 +1315,9 @@ static void power_pmu_enable(struct pmu *pmu)
write_mmcr0(cpuhw, mmcr0);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, 0);
/*
* Enable instruction sampling if necessary
*/
......@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event)) {
/* PMU has BHRB enabled */
if (!(ppmu->flags & PPMU_BHRB))
if (!(ppmu->flags & PPMU_ARCH_207S))
return -EOPNOTSUPP;
}
......
......@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
.get_constraint = power8_get_constraint,
.get_alternatives = power8_get_alternatives,
.disable_pmc = power8_disable_pmc,
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
.cache_events = &power8_cache_events,
......
......@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
return ret;
}
#ifdef CONFIG_COREDUMP
int elf_coredump_extra_notes_size(void)
{
struct spufs_calls *calls;
......@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
return ret;
}
#endif
void notify_spus_active(void)
{
......
obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += inode.o file.o context.o syscalls.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
spufs-$(CONFIG_COREDUMP) += coredump.o
# magic for the trace events
CFLAGS_sched.o := -I$(src)
......
......@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
struct spufs_calls spufs_calls = {
.create_thread = do_spu_create,
.spu_run = do_spu_run,
.coredump_extra_notes_size = spufs_coredump_extra_notes_size,
.coredump_extra_notes_write = spufs_coredump_extra_notes_write,
.notify_spus_active = do_notify_spus_active,
.owner = THIS_MODULE,
#ifdef CONFIG_COREDUMP
.coredump_extra_notes_size = spufs_coredump_extra_notes_size,
.coredump_extra_notes_write = spufs_coredump_extra_notes_write,
#endif
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册