提交 b6fedfd2 编写于 作者: L Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/booke: Fix breakpoint/watchpoint one-shot behavior
  powerpc: Reduce printk from pseries_mach_cpu_die()
  powerpc: Move checks in pseries_mach_cpu_die()
  powerpc: Reset kernel stack on cpu online from cede state
  powerpc: Fix G5 thermal shutdown
  powerpc/pseries: Pass CPPR value to H_XIRR hcall
  powerpc/booke: Fix a couple typos in the advanced ptrace code
  powerpc: Fix SMP build with disabled CPU hotplugging.
  powerpc: Dynamically allocate pacas
  powerpc/perf: e500 support
  powerpc/perf: Build callchain code regardless of hardware event support.
  powerpc/cpm2: Checkpatch cleanup
  powerpc/86xx: Renaming following split of GE Fanuc joint venture
  powerpc/86xx: Convert gef_pic_lock to raw_spinlock
  powerpc/qe: Convert qe_ic_lock to raw_spinlock
  powerpc/82xx: Convert pci_pic_lock to raw_spinlock
  powerpc/85xx: Convert socrates_fpga_pic_lock to raw_spinlock
/* /*
* GE Fanuc PPC9A Device Tree Source * GE PPC9A Device Tree Source
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
......
/* /*
* GE Fanuc SBC310 Device Tree Source * GE SBC310 Device Tree Source
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
......
/* /*
* GE Fanuc SBC610 Device Tree Source * GE SBC610 Device Tree Source
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
......
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#define _ASM_POWERPC_PACA_H #define _ASM_POWERPC_PACA_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_PPC64
#include <linux/init.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -145,8 +148,19 @@ struct paca_struct { ...@@ -145,8 +148,19 @@ struct paca_struct {
#endif #endif
}; };
extern struct paca_struct paca[]; extern struct paca_struct *paca;
extern void initialise_pacas(void); extern __initdata struct paca_struct boot_paca;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void allocate_pacas(void);
extern void free_unused_pacas(void);
#else /* CONFIG_PPC64 */
static inline void allocate_pacas(void) { };
static inline void free_unused_pacas(void) { };
#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PACA_H */ #endif /* _ASM_POWERPC_PACA_H */
/* /*
* Performance event support - PowerPC-specific definitions. * Performance event support - hardware-specific disambiguation
* *
* Copyright 2008-2009 Paul Mackerras, IBM Corporation. * For now this is a compile-time decision, but eventually it should be
* runtime. This would allow multiplatform perf event support for e300 (fsl
* embedded perf counters) plus server/classic, and would accommodate
* devices other than the core which provide their own performance counters.
*
* Copyright 2010 Freescale Semiconductor, Inc.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/types.h>
#include <asm/hw_irq.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
/*
* This struct provides the constants and functions needed to
* describe the PMU on a particular POWER-family CPU.
*/
struct power_pmu {
const char *name;
int n_counter;
int max_alternatives;
unsigned long add_fields;
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], unsigned long mmcr[]);
int (*get_constraint)(u64 event_id, unsigned long *mskp,
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
/*
* Values for power_pmu.flags
*/
#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
/*
* Values for flags to get_alternatives()
*/
#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
#define PERF_EVENT_INDEX_OFFSET 1
/*
* Only override the default definitions in include/linux/perf_event.h
* if we have hardware PMU support.
*/
#ifdef CONFIG_PPC_PERF_CTRS #ifdef CONFIG_PPC_PERF_CTRS
#define perf_misc_flags(regs) perf_misc_flags(regs) #include <asm/perf_event_server.h>
#endif #endif
/* #ifdef CONFIG_FSL_EMB_PERF_EVENT
* The power_pmu.get_constraint function returns a 32/64-bit value and #include <asm/perf_event_fsl_emb.h>
* a 32/64-bit mask that express the constraints between this event_id and #endif
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
* of three different types:
*
* Select field: this expresses the constraint that some set of bits
* in MMCR* needs to be set to a specific value for this event_id. For a
* select field, the mask contains 1s in every bit of the field, and
* the value contains a unique value for each possible setting of the
* MMCR* bits. The constraint checking code will ensure that two events
* that set the same field in their masks have the same value in their
* value dwords.
*
* Add field: this expresses the constraint that there can be at most
* N events in a particular class. A field of k bits can be used for
* N <= 2^(k-1) - 1. The mask has the most significant bit of the field
* set (and the other bits 0), and the value has only the least significant
* bit of the field set. In addition, the 'add_fields' and 'test_adder'
* in the struct power_pmu for this processor come into play. The
* add_fields value contains 1 in the LSB of the field, and the
* test_adder contains 2^(k-1) - 1 - N in the field.
*
* NAND field: this expresses the constraint that you may not have events
* in all of a set of classes. (For example, on PPC970, you can't select
* events from the FPU, ISU and IDU simultaneously, although any two are
* possible.) For N classes, the field is N+1 bits wide, and each class
* is assigned one bit from the least-significant N bits. The mask has
* only the most-significant bit set, and the value has only the bit
* for the event_id's class set. The test_adder has the least significant
* bit set in the field.
*
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
/*
* Performance event support - Freescale embedded specific definitions.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/hw_irq.h>
#define MAX_HWEVENTS 4
/* event flags */
#define FSL_EMB_EVENT_VALID 1
#define FSL_EMB_EVENT_RESTRICTED 2
/* upper half of event flags is PMLCb */
#define FSL_EMB_EVENT_THRESHMUL 0x0000070000000000ULL
#define FSL_EMB_EVENT_THRESH 0x0000003f00000000ULL
struct fsl_emb_pmu {
const char *name;
int n_counter; /* total number of counters */
/*
* The number of contiguous counters starting at zero that
* can hold restricted events, or zero if there are no
* restricted events.
*
* This isn't a very flexible method of expressing constraints,
* but it's very simple and is adequate for existing chips.
*/
int n_restricted;
/* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */
u64 (*xlate_event)(u64 event_id);
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
int register_fsl_emb_pmu(struct fsl_emb_pmu *);
/*
* Performance event support - PowerPC classic/server specific definitions.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/hw_irq.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
/*
* This struct provides the constants and functions needed to
* describe the PMU on a particular POWER-family CPU.
*/
struct power_pmu {
const char *name;
int n_counter;
int max_alternatives;
unsigned long add_fields;
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], unsigned long mmcr[]);
int (*get_constraint)(u64 event_id, unsigned long *mskp,
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
/*
* Values for power_pmu.flags
*/
#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
/*
* Values for flags to get_alternatives()
*/
#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
#define PERF_EVENT_INDEX_OFFSET 1
/*
* Only override the default definitions in include/linux/perf_event.h
* if we have hardware PMU support.
*/
#ifdef CONFIG_PPC_PERF_CTRS
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
/*
* The power_pmu.get_constraint function returns a 32/64-bit value and
* a 32/64-bit mask that express the constraints between this event_id and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
* of three different types:
*
* Select field: this expresses the constraint that some set of bits
* in MMCR* needs to be set to a specific value for this event_id. For a
* select field, the mask contains 1s in every bit of the field, and
* the value contains a unique value for each possible setting of the
* MMCR* bits. The constraint checking code will ensure that two events
* that set the same field in their masks have the same value in their
* value dwords.
*
* Add field: this expresses the constraint that there can be at most
* N events in a particular class. A field of k bits can be used for
* N <= 2^(k-1) - 1. The mask has the most significant bit of the field
* set (and the other bits 0), and the value has only the least significant
* bit of the field set. In addition, the 'add_fields' and 'test_adder'
* in the struct power_pmu for this processor come into play. The
* add_fields value contains 1 in the LSB of the field, and the
* test_adder contains 2^(k-1) - 1 - N in the field.
*
* NAND field: this expresses the constraint that you may not have events
* in all of a set of classes. (For example, on PPC970, you can't select
* events from the FPU, ISU and IDU simultaneously, although any two are
* possible.) For N classes, the field is N+1 bits wide, and each class
* is assigned one bit from the least-significant N bits. The mask has
* only the most-significant bit set, and the value has only the bit
* for the event_id's class set. The test_adder has the least significant
* bit set in the field.
*
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
...@@ -421,8 +421,8 @@ ...@@ -421,8 +421,8 @@
/* Bit definitions related to the DBCR2. */ /* Bit definitions related to the DBCR2. */
#define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */ #define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */
#define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */ #define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */
#define DBCR2_DAC2US 0x00000000 /* Data Addr Cmp 2 Sup/User */ #define DBCR2_DAC2US 0x0C000000 /* Data Addr Cmp 2 Sup/User */
#define DBCR2_DAC2ER 0x00000000 /* Data Addr Cmp 2 Eff/Real */ #define DBCR2_DAC2ER 0x03000000 /* Data Addr Cmp 2 Eff/Real */
#define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */
#define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/ #define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/
#define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ #define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
#define PMLCA_CE 0x04000000 /* Condition Enable */ #define PMLCA_CE 0x04000000 /* Condition Enable */
#define PMLCA_EVENT_MASK 0x007f0000 /* Event field */ #define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */
#define PMLCA_EVENT_SHIFT 16 #define PMLCA_EVENT_SHIFT 16
#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */ #define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
......
...@@ -98,11 +98,16 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o ...@@ -98,11 +98,16 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
ifneq ($(CONFIG_PPC_INDIRECT_IO),y) ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
......
...@@ -1808,7 +1808,7 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -1808,7 +1808,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64, .icache_bsize = 64,
.dcache_bsize = 64, .dcache_bsize = 64,
.num_pmcs = 4, .num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500", /* xxx - galak, e500mc? */ .oprofile_cpu_type = "ppc/e500mc",
.oprofile_type = PPC_OPROFILE_FSL_EMB, .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500mc, .cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500, .machine_check = machine_check_e500,
......
/*
* Performance counter support for e500 family processors.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
/*
* Map of generic hardware event types to hardware events
* Zero if unsupported
*/
static int e500_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 1,
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
/*
* D-cache misses are not split into read/write/prefetch;
* use raw event 41.
*/
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 27, 0 },
[C(OP_WRITE)] = { 28, 0 },
[C(OP_PREFETCH)] = { 29, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 2, 60 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0, 0 },
},
/*
* Assuming LL means L2, it's not a good match for this model.
* It allocates only on L1 castout or explicit prefetch, and
* does not have separate read/write events (but it does have
* separate instruction/data events).
*/
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0, 0 },
},
/*
* There are data/instruction MMU misses, but that's a miss on
* the chip's internal level-one TLB which is probably not
* what the user wants. Instead, unified level-two TLB misses
* are reported here.
*/
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 26, 66 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 12, 15 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
static int num_events = 128;
/* Upper half of event id is PMLCb, for threshold events */
static u64 e500_xlate_event(u64 event_id)
{
u32 event_low = (u32)event_id;
u64 ret;
if (event_low >= num_events)
return 0;
ret = FSL_EMB_EVENT_VALID;
if (event_low >= 76 && event_low <= 81) {
ret |= FSL_EMB_EVENT_RESTRICTED;
ret |= event_id &
(FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
} else if (event_id &
(FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
/* Threshold requested on non-threshold event */
return 0;
}
return ret;
}
static struct fsl_emb_pmu e500_pmu = {
.name = "e500 family",
.n_counter = 4,
.n_restricted = 2,
.xlate_event = e500_xlate_event,
.n_generic = ARRAY_SIZE(e500_generic_events),
.generic_events = e500_generic_events,
.cache_events = &e500_cache_events,
};
static int init_e500_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
num_events = 256;
else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
return -ENODEV;
return register_fsl_emb_pmu(&e500_pmu);
}
arch_initcall(init_e500_pmu);
...@@ -219,7 +219,8 @@ generic_secondary_common_init: ...@@ -219,7 +219,8 @@ generic_secondary_common_init:
* physical cpu id in r24, we need to search the pacas to find * physical cpu id in r24, we need to search the pacas to find
* which logical id maps to our physical one. * which logical id maps to our physical one.
*/ */
LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */ LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
ld r13,0(r13) /* Get base vaddr of paca array */
li r5,0 /* logical cpu id */ li r5,0 /* logical cpu id */
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
cmpw r6,r24 /* Compare to our id */ cmpw r6,r24 /* Compare to our id */
...@@ -536,7 +537,8 @@ _GLOBAL(pmac_secondary_start) ...@@ -536,7 +537,8 @@ _GLOBAL(pmac_secondary_start)
mtmsrd r3 /* RI on */ mtmsrd r3 /* RI on */
/* Set up a paca value for this processor. */ /* Set up a paca value for this processor. */
LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */ LOAD_REG_ADDR(r4,paca) /* Load paca pointer */
ld r4,0(r4) /* Get base vaddr of paca array */
mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r4 /* for this processor. */ add r13,r13,r4 /* for this processor. */
mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
...@@ -615,6 +617,17 @@ _GLOBAL(start_secondary_prolog) ...@@ -615,6 +617,17 @@ _GLOBAL(start_secondary_prolog)
std r3,0(r1) /* Zero the stack frame pointer */ std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary bl .start_secondary
b . b .
/*
* Reset stack pointer and call start_secondary
* to continue with online operation when woken up
* from cede in cpu offline.
*/
_GLOBAL(start_secondary_resume)
ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
li r3,0
std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary
b .
#endif #endif
/* /*
......
...@@ -9,11 +9,15 @@ ...@@ -9,11 +9,15 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/lmb.h>
#include <asm/firmware.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
#include <asm/iseries/hv_types.h>
/* This symbol is provided by the linker - let it fill in the paca /* This symbol is provided by the linker - let it fill in the paca
* field correctly */ * field correctly */
...@@ -70,37 +74,82 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { ...@@ -70,37 +74,82 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
* processors. The processor VPD array needs one entry per physical * processors. The processor VPD array needs one entry per physical
* processor (not thread). * processor (not thread).
*/ */
struct paca_struct paca[NR_CPUS]; struct paca_struct *paca;
EXPORT_SYMBOL(paca); EXPORT_SYMBOL(paca);
void __init initialise_pacas(void) struct paca_struct boot_paca;
{
int cpu;
/* The TOC register (GPR2) points 32kB into the TOC, so that 64kB void __init initialise_paca(struct paca_struct *new_paca, int cpu)
* of the TOC can be addressed using a single machine instruction. {
*/ /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
* of the TOC can be addressed using a single machine instruction.
*/
unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
/* Can't use for_each_*_cpu, as they aren't functional yet */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
struct paca_struct *new_paca = &paca[cpu];
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
new_paca->lppaca_ptr = &lppaca[cpu]; new_paca->lppaca_ptr = &lppaca[cpu];
#else #else
new_paca->kernel_pgd = swapper_pg_dir; new_paca->kernel_pgd = swapper_pg_dir;
#endif #endif
new_paca->lock_token = 0x8000; new_paca->lock_token = 0x8000;
new_paca->paca_index = cpu; new_paca->paca_index = cpu;
new_paca->kernel_toc = kernel_toc; new_paca->kernel_toc = kernel_toc;
new_paca->kernelbase = (unsigned long) _stext; new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL; new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff; new_paca->hw_cpu_id = 0xffff;
new_paca->__current = &init_task; new_paca->__current = &init_task;
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu]; new_paca->slb_shadow_ptr = &slb_shadow[cpu];
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
}
static int __initdata paca_size;
void __init allocate_pacas(void)
{
int nr_cpus, cpu, limit;
/*
* We can't take SLB misses on the paca, and we want to access them
* in real mode, so allocate them within the RMA and also within
* the first segment. On iSeries they must be within the area mapped
* by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
*/
limit = min(0x10000000ULL, lmb.rmo_size);
if (firmware_has_feature(FW_FEATURE_ISERIES))
limit = min(limit, HvPagesToMap * HVPAGESIZE);
nr_cpus = NR_CPUS;
/* On iSeries we know we can never have more than 64 cpus */
if (firmware_has_feature(FW_FEATURE_ISERIES))
nr_cpus = min(64, nr_cpus);
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit));
memset(paca, 0, paca_size);
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
paca_size, nr_cpus, paca);
/* Can't use for_each_*_cpu, as they aren't functional yet */
for (cpu = 0; cpu < nr_cpus; cpu++)
initialise_paca(&paca[cpu], cpu);
}
void __init free_unused_pacas(void)
{
int new_size;
new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus());
if (new_size >= paca_size)
return;
lmb_free(__pa(paca) + new_size, paca_size - new_size);
printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
paca_size - new_size);
} paca_size = new_size;
} }
/*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = atomic64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
atomic64_add(delta, &event->count);
atomic64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
void hw_perf_disable(void)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* perf must be disabled, context locked on entry */
static int fsl_emb_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = atomic64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
atomic64_set(&event->hw.prev_count, val);
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
return ret;
}
/* perf must be disabled, context locked on entry */
static void fsl_emb_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
put_cpu_var(cpu_hw_events);
}
/*
* Re-enable interrupts on a event after they were throttled
* because they were coming too fast.
*
* Context is locked on entry, but perf is not disabled.
*/
static void fsl_emb_pmu_unthrottle(struct perf_event *event)
{
s64 val, left;
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
fsl_emb_pmu_read(event);
left = event->hw.sample_period;
event->hw.last_period = left;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
write_pmc(event->hw.idx, val);
atomic64_set(&event->hw.prev_count, val);
atomic64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
local_irq_restore(flags);
}
static struct pmu fsl_emb_pmu = {
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
.read = fsl_emb_pmu_read,
.unthrottle = fsl_emb_pmu_unthrottle,
};
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
const struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return ERR_PTR(err);
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return ERR_PTR(-EINVAL);
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return ERR_PTR(-EINVAL);
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return ERR_PTR(-EINVAL);
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return ERR_PTR(-EINVAL);
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return ERR_PTR(-ENOTSUPP);
event->hw.last_period = event->hw.sample_period;
atomic64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
if (err)
return ERR_PTR(err);
return &fsl_emb_pmu;
}
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
/* we don't have to worry about interrupts here */
prev = atomic64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
atomic64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = atomic64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data = {
.period = event->hw.last_period,
};
if (perf_event_overflow(event, nmi, &data, regs)) {
/*
* Interrupts are coming too fast - throttle them
* by setting the event to 0, so it will be
* at least 2^30 cycles until the next interrupt
* (assuming each event counts at most 2 counts
* per cycle).
*/
val = 0;
left = ~0ULL >> 1;
}
}
write_pmc(event->hw.idx, val);
atomic64_set(&event->hw.prev_count, val);
atomic64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
return 0;
}
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/paca.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pci.h> #include <asm/pci.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -721,6 +722,8 @@ void __init early_init_devtree(void *params) ...@@ -721,6 +722,8 @@ void __init early_init_devtree(void *params)
* FIXME .. and the initrd too? */ * FIXME .. and the initrd too? */
move_device_tree(); move_device_tree();
allocate_pacas();
DBG("Scanning CPUs ...\n"); DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree /* Retreive CPU related informations from the flat tree
......
...@@ -940,7 +940,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) ...@@ -940,7 +940,7 @@ static int del_instruction_bp(struct task_struct *child, int slot)
{ {
switch (slot) { switch (slot) {
case 1: case 1:
if (child->thread.iac1 == 0) if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT; return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) { if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
...@@ -952,7 +952,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) ...@@ -952,7 +952,7 @@ static int del_instruction_bp(struct task_struct *child, int slot)
child->thread.dbcr0 &= ~DBCR0_IAC1; child->thread.dbcr0 &= ~DBCR0_IAC1;
break; break;
case 2: case 2:
if (child->thread.iac2 == 0) if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT; return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) if (dbcr_iac_range(child) & DBCR_IAC12MODE)
...@@ -963,7 +963,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) ...@@ -963,7 +963,7 @@ static int del_instruction_bp(struct task_struct *child, int slot)
break; break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3: case 3:
if (child->thread.iac3 == 0) if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT; return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) { if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
...@@ -975,7 +975,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) ...@@ -975,7 +975,7 @@ static int del_instruction_bp(struct task_struct *child, int slot)
child->thread.dbcr0 &= ~DBCR0_IAC3; child->thread.dbcr0 &= ~DBCR0_IAC3;
break; break;
case 4: case 4:
if (child->thread.iac4 == 0) if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT; return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) if (dbcr_iac_range(child) & DBCR_IAC34MODE)
...@@ -1054,7 +1054,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) ...@@ -1054,7 +1054,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
static int del_dac(struct task_struct *child, int slot) static int del_dac(struct task_struct *child, int slot)
{ {
if (slot == 1) { if (slot == 1) {
if (child->thread.dac1 == 0) if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT; return -ENOENT;
child->thread.dac1 = 0; child->thread.dac1 = 0;
...@@ -1070,7 +1070,7 @@ static int del_dac(struct task_struct *child, int slot) ...@@ -1070,7 +1070,7 @@ static int del_dac(struct task_struct *child, int slot)
child->thread.dvc1 = 0; child->thread.dvc1 = 0;
#endif #endif
} else if (slot == 2) { } else if (slot == 2) {
if (child->thread.dac1 == 0) if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT; return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/lmb.h> #include <linux/lmb.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
...@@ -493,6 +494,8 @@ void __init smp_setup_cpu_maps(void) ...@@ -493,6 +494,8 @@ void __init smp_setup_cpu_maps(void)
* here will have to be reworked * here will have to be reworked
*/ */
cpu_init_thread_core_maps(nthreads); cpu_init_thread_core_maps(nthreads);
free_unused_pacas();
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -144,9 +144,9 @@ early_param("smt-enabled", early_smt_enabled); ...@@ -144,9 +144,9 @@ early_param("smt-enabled", early_smt_enabled);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* Put the paca pointer into r13 and SPRG_PACA */ /* Put the paca pointer into r13 and SPRG_PACA */
void __init setup_paca(int cpu) static void __init setup_paca(struct paca_struct *new_paca)
{ {
local_paca = &paca[cpu]; local_paca = new_paca;
mtspr(SPRN_SPRG_PACA, local_paca); mtspr(SPRN_SPRG_PACA, local_paca);
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
...@@ -176,14 +176,12 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -176,14 +176,12 @@ void __init early_setup(unsigned long dt_ptr)
{ {
/* -------- printk is _NOT_ safe to use here ! ------- */ /* -------- printk is _NOT_ safe to use here ! ------- */
/* Fill in any unititialised pacas */
initialise_pacas();
/* Identify CPU type */ /* Identify CPU type */
identify_cpu(0, mfspr(SPRN_PVR)); identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */ /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0); initialise_paca(&boot_paca, 0);
setup_paca(&boot_paca);
/* Initialize lockdep early or else spinlocks will blow */ /* Initialize lockdep early or else spinlocks will blow */
lockdep_init(); lockdep_init();
...@@ -203,7 +201,7 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -203,7 +201,7 @@ void __init early_setup(unsigned long dt_ptr)
early_init_devtree(__va(dt_ptr)); early_init_devtree(__va(dt_ptr));
/* Now we know the logical id of our boot cpu, setup the paca. */ /* Now we know the logical id of our boot cpu, setup the paca. */
setup_paca(boot_cpuid); setup_paca(&paca[boot_cpuid]);
/* Fix up paca fields required for the boot cpu */ /* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1; get_paca()->cpu_start = 1;
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "pq2.h" #include "pq2.h"
static DEFINE_SPINLOCK(pci_pic_lock); static DEFINE_RAW_SPINLOCK(pci_pic_lock);
struct pq2ads_pci_pic { struct pq2ads_pci_pic {
struct device_node *node; struct device_node *node;
...@@ -45,12 +45,12 @@ static void pq2ads_pci_mask_irq(unsigned int virq) ...@@ -45,12 +45,12 @@ static void pq2ads_pci_mask_irq(unsigned int virq)
if (irq != -1) { if (irq != -1) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pci_pic_lock, flags); raw_spin_lock_irqsave(&pci_pic_lock, flags);
setbits32(&priv->regs->mask, 1 << irq); setbits32(&priv->regs->mask, 1 << irq);
mb(); mb();
spin_unlock_irqrestore(&pci_pic_lock, flags); raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
} }
} }
...@@ -62,9 +62,9 @@ static void pq2ads_pci_unmask_irq(unsigned int virq) ...@@ -62,9 +62,9 @@ static void pq2ads_pci_unmask_irq(unsigned int virq)
if (irq != -1) { if (irq != -1) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pci_pic_lock, flags); raw_spin_lock_irqsave(&pci_pic_lock, flags);
clrbits32(&priv->regs->mask, 1 << irq); clrbits32(&priv->regs->mask, 1 << irq);
spin_unlock_irqrestore(&pci_pic_lock, flags); raw_spin_unlock_irqrestore(&pci_pic_lock, flags);
} }
} }
......
...@@ -50,7 +50,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { ...@@ -50,7 +50,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
#define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
static DEFINE_SPINLOCK(socrates_fpga_pic_lock); static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
static void __iomem *socrates_fpga_pic_iobase; static void __iomem *socrates_fpga_pic_iobase;
static struct irq_host *socrates_fpga_pic_irq_host; static struct irq_host *socrates_fpga_pic_irq_host;
...@@ -80,9 +80,9 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) ...@@ -80,9 +80,9 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
if (i == 3) if (i == 3)
return NO_IRQ; return NO_IRQ;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i)); cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i));
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) { for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) {
if (cause >> (i + 16)) if (cause >> (i + 16))
break; break;
...@@ -116,12 +116,12 @@ static void socrates_fpga_pic_ack(unsigned int virq) ...@@ -116,12 +116,12 @@ static void socrates_fpga_pic_ack(unsigned int virq)
hwirq = socrates_fpga_irq_to_hw(virq); hwirq = socrates_fpga_irq_to_hw(virq);
irq_line = fpga_irqs[hwirq].irq_line; irq_line = fpga_irqs[hwirq].irq_line;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK; & SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << (hwirq + 16)); mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
} }
static void socrates_fpga_pic_mask(unsigned int virq) static void socrates_fpga_pic_mask(unsigned int virq)
...@@ -134,12 +134,12 @@ static void socrates_fpga_pic_mask(unsigned int virq) ...@@ -134,12 +134,12 @@ static void socrates_fpga_pic_mask(unsigned int virq)
hwirq = socrates_fpga_irq_to_hw(virq); hwirq = socrates_fpga_irq_to_hw(virq);
irq_line = fpga_irqs[hwirq].irq_line; irq_line = fpga_irqs[hwirq].irq_line;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK; & SOCRATES_FPGA_IRQ_MASK;
mask &= ~(1 << hwirq); mask &= ~(1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
} }
static void socrates_fpga_pic_mask_ack(unsigned int virq) static void socrates_fpga_pic_mask_ack(unsigned int virq)
...@@ -152,13 +152,13 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq) ...@@ -152,13 +152,13 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq)
hwirq = socrates_fpga_irq_to_hw(virq); hwirq = socrates_fpga_irq_to_hw(virq);
irq_line = fpga_irqs[hwirq].irq_line; irq_line = fpga_irqs[hwirq].irq_line;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK; & SOCRATES_FPGA_IRQ_MASK;
mask &= ~(1 << hwirq); mask &= ~(1 << hwirq);
mask |= (1 << (hwirq + 16)); mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
} }
static void socrates_fpga_pic_unmask(unsigned int virq) static void socrates_fpga_pic_unmask(unsigned int virq)
...@@ -171,12 +171,12 @@ static void socrates_fpga_pic_unmask(unsigned int virq) ...@@ -171,12 +171,12 @@ static void socrates_fpga_pic_unmask(unsigned int virq)
hwirq = socrates_fpga_irq_to_hw(virq); hwirq = socrates_fpga_irq_to_hw(virq);
irq_line = fpga_irqs[hwirq].irq_line; irq_line = fpga_irqs[hwirq].irq_line;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK; & SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << hwirq); mask |= (1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
} }
static void socrates_fpga_pic_eoi(unsigned int virq) static void socrates_fpga_pic_eoi(unsigned int virq)
...@@ -189,12 +189,12 @@ static void socrates_fpga_pic_eoi(unsigned int virq) ...@@ -189,12 +189,12 @@ static void socrates_fpga_pic_eoi(unsigned int virq)
hwirq = socrates_fpga_irq_to_hw(virq); hwirq = socrates_fpga_irq_to_hw(virq);
irq_line = fpga_irqs[hwirq].irq_line; irq_line = fpga_irqs[hwirq].irq_line;
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK; & SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << (hwirq + 16)); mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
} }
static int socrates_fpga_pic_set_type(unsigned int virq, static int socrates_fpga_pic_set_type(unsigned int virq,
...@@ -220,14 +220,14 @@ static int socrates_fpga_pic_set_type(unsigned int virq, ...@@ -220,14 +220,14 @@ static int socrates_fpga_pic_set_type(unsigned int virq,
default: default:
return -EINVAL; return -EINVAL;
} }
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG); mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG);
if (polarity) if (polarity)
mask |= (1 << hwirq); mask |= (1 << hwirq);
else else
mask &= ~(1 << hwirq); mask &= ~(1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask); socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
return 0; return 0;
} }
...@@ -314,14 +314,14 @@ void socrates_fpga_pic_init(struct device_node *pic) ...@@ -314,14 +314,14 @@ void socrates_fpga_pic_init(struct device_node *pic)
socrates_fpga_pic_iobase = of_iomap(pic, 0); socrates_fpga_pic_iobase = of_iomap(pic, 0);
spin_lock_irqsave(&socrates_fpga_pic_lock, flags); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0), socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0),
SOCRATES_FPGA_IRQ_MASK << 16); SOCRATES_FPGA_IRQ_MASK << 16);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1), socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1),
SOCRATES_FPGA_IRQ_MASK << 16); SOCRATES_FPGA_IRQ_MASK << 16);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2), socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2),
SOCRATES_FPGA_IRQ_MASK << 16); SOCRATES_FPGA_IRQ_MASK << 16);
spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n"); pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n");
} }
...@@ -33,32 +33,32 @@ config MPC8610_HPCD ...@@ -33,32 +33,32 @@ config MPC8610_HPCD
This option enables support for the MPC8610 HPCD board. This option enables support for the MPC8610 HPCD board.
config GEF_PPC9A config GEF_PPC9A
bool "GE Fanuc PPC9A" bool "GE PPC9A"
select DEFAULT_UIMAGE select DEFAULT_UIMAGE
select MMIO_NVRAM select MMIO_NVRAM
select GENERIC_GPIO select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
help help
This option enables support for GE Fanuc's PPC9A. This option enables support for the GE PPC9A.
config GEF_SBC310 config GEF_SBC310
bool "GE Fanuc SBC310" bool "GE SBC310"
select DEFAULT_UIMAGE select DEFAULT_UIMAGE
select MMIO_NVRAM select MMIO_NVRAM
select GENERIC_GPIO select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
help help
This option enables support for GE Fanuc's SBC310. This option enables support for the GE SBC310.
config GEF_SBC610 config GEF_SBC610
bool "GE Fanuc SBC610" bool "GE SBC610"
select DEFAULT_UIMAGE select DEFAULT_UIMAGE
select MMIO_NVRAM select MMIO_NVRAM
select GENERIC_GPIO select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO select HAS_RAPIDIO
help help
This option enables support for GE Fanuc's SBC610. This option enables support for the GE SBC610.
endif endif
......
/* /*
* Driver for GE Fanuc's FPGA based GPIO pins * Driver for GE FPGA based GPIO
* *
* Author: Martyn Welch <martyn.welch@gefanuc.com> * Author: Martyn Welch <martyn.welch@ge.com>
* *
* 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
* *
* This file is licensed under the terms of the GNU General Public License * This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any * version 2. This program is licensed "as is" without any warranty of any
...@@ -164,6 +164,6 @@ static int __init gef_gpio_init(void) ...@@ -164,6 +164,6 @@ static int __init gef_gpio_init(void)
}; };
arch_initcall(gef_gpio_init); arch_initcall(gef_gpio_init);
MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver"); MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
/* /*
* Interrupt handling for GE Fanuc's FPGA based PIC * Interrupt handling for GE FPGA based PIC
* *
* Author: Martyn Welch <martyn.welch@gefanuc.com> * Author: Martyn Welch <martyn.welch@ge.com>
* *
* 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
* *
* This file is licensed under the terms of the GNU General Public License * This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any * version 2. This program is licensed "as is" without any warranty of any
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
static DEFINE_SPINLOCK(gef_pic_lock); static DEFINE_RAW_SPINLOCK(gef_pic_lock);
static void __iomem *gef_pic_irq_reg_base; static void __iomem *gef_pic_irq_reg_base;
static struct irq_host *gef_pic_irq_host; static struct irq_host *gef_pic_irq_host;
...@@ -118,11 +118,11 @@ static void gef_pic_mask(unsigned int virq) ...@@ -118,11 +118,11 @@ static void gef_pic_mask(unsigned int virq)
hwirq = gef_irq_to_hw(virq); hwirq = gef_irq_to_hw(virq);
spin_lock_irqsave(&gef_pic_lock, flags); raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask &= ~(1 << hwirq); mask &= ~(1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
spin_unlock_irqrestore(&gef_pic_lock, flags); raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
} }
static void gef_pic_mask_ack(unsigned int virq) static void gef_pic_mask_ack(unsigned int virq)
...@@ -141,11 +141,11 @@ static void gef_pic_unmask(unsigned int virq) ...@@ -141,11 +141,11 @@ static void gef_pic_unmask(unsigned int virq)
hwirq = gef_irq_to_hw(virq); hwirq = gef_irq_to_hw(virq);
spin_lock_irqsave(&gef_pic_lock, flags); raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask |= (1 << hwirq); mask |= (1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
spin_unlock_irqrestore(&gef_pic_lock, flags); raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
} }
static struct irq_chip gef_pic_chip = { static struct irq_chip gef_pic_chip = {
...@@ -199,7 +199,7 @@ void __init gef_pic_init(struct device_node *np) ...@@ -199,7 +199,7 @@ void __init gef_pic_init(struct device_node *np)
/* Map the devices registers into memory */ /* Map the devices registers into memory */
gef_pic_irq_reg_base = of_iomap(np, 0); gef_pic_irq_reg_base = of_iomap(np, 0);
spin_lock_irqsave(&gef_pic_lock, flags); raw_spin_lock_irqsave(&gef_pic_lock, flags);
/* Initialise everything as masked. */ /* Initialise everything as masked. */
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0);
...@@ -208,7 +208,7 @@ void __init gef_pic_init(struct device_node *np) ...@@ -208,7 +208,7 @@ void __init gef_pic_init(struct device_node *np)
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0);
spin_unlock_irqrestore(&gef_pic_lock, flags); raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
/* Map controller */ /* Map controller */
gef_pic_cascade_irq = irq_of_parse_and_map(np, 0); gef_pic_cascade_irq = irq_of_parse_and_map(np, 0);
......
/* /*
* GE Fanuc PPC9A board support * GE PPC9A board support
* *
* Author: Martyn Welch <martyn.welch@gefanuc.com> * Author: Martyn Welch <martyn.welch@ge.com>
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
...@@ -82,7 +82,7 @@ static void __init gef_ppc9a_setup_arch(void) ...@@ -82,7 +82,7 @@ static void __init gef_ppc9a_setup_arch(void)
} }
#endif #endif
printk(KERN_INFO "GE Fanuc Intelligent Platforms PPC9A 6U VME SBC\n"); printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mpc86xx_smp_init(); mpc86xx_smp_init();
...@@ -151,7 +151,7 @@ static void gef_ppc9a_show_cpuinfo(struct seq_file *m) ...@@ -151,7 +151,7 @@ static void gef_ppc9a_show_cpuinfo(struct seq_file *m)
{ {
uint svid = mfspr(SPRN_SVR); uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(), seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(),
('A' + gef_ppc9a_get_board_rev())); ('A' + gef_ppc9a_get_board_rev()));
...@@ -235,7 +235,7 @@ static int __init declare_of_platform_devices(void) ...@@ -235,7 +235,7 @@ static int __init declare_of_platform_devices(void)
machine_device_initcall(gef_ppc9a, declare_of_platform_devices); machine_device_initcall(gef_ppc9a, declare_of_platform_devices);
define_machine(gef_ppc9a) { define_machine(gef_ppc9a) {
.name = "GE Fanuc PPC9A", .name = "GE PPC9A",
.probe = gef_ppc9a_probe, .probe = gef_ppc9a_probe,
.setup_arch = gef_ppc9a_setup_arch, .setup_arch = gef_ppc9a_setup_arch,
.init_IRQ = gef_ppc9a_init_irq, .init_IRQ = gef_ppc9a_init_irq,
......
/* /*
* GE Fanuc SBC310 board support * GE SBC310 board support
* *
* Author: Martyn Welch <martyn.welch@gefanuc.com> * Author: Martyn Welch <martyn.welch@ge.com>
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
...@@ -82,7 +82,7 @@ static void __init gef_sbc310_setup_arch(void) ...@@ -82,7 +82,7 @@ static void __init gef_sbc310_setup_arch(void)
} }
#endif #endif
printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC310 6U VPX SBC\n"); printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mpc86xx_smp_init(); mpc86xx_smp_init();
...@@ -142,7 +142,7 @@ static void gef_sbc310_show_cpuinfo(struct seq_file *m) ...@@ -142,7 +142,7 @@ static void gef_sbc310_show_cpuinfo(struct seq_file *m)
{ {
uint svid = mfspr(SPRN_SVR); uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id()); seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id());
seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(), seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(),
...@@ -223,7 +223,7 @@ static int __init declare_of_platform_devices(void) ...@@ -223,7 +223,7 @@ static int __init declare_of_platform_devices(void)
machine_device_initcall(gef_sbc310, declare_of_platform_devices); machine_device_initcall(gef_sbc310, declare_of_platform_devices);
define_machine(gef_sbc310) { define_machine(gef_sbc310) {
.name = "GE Fanuc SBC310", .name = "GE SBC310",
.probe = gef_sbc310_probe, .probe = gef_sbc310_probe,
.setup_arch = gef_sbc310_setup_arch, .setup_arch = gef_sbc310_setup_arch,
.init_IRQ = gef_sbc310_init_irq, .init_IRQ = gef_sbc310_init_irq,
......
/* /*
* GE Fanuc SBC610 board support * GE SBC610 board support
* *
* Author: Martyn Welch <martyn.welch@gefanuc.com> * Author: Martyn Welch <martyn.welch@ge.com>
* *
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
...@@ -82,7 +82,7 @@ static void __init gef_sbc610_setup_arch(void) ...@@ -82,7 +82,7 @@ static void __init gef_sbc610_setup_arch(void)
} }
#endif #endif
printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC610 6U VPX SBC\n"); printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
mpc86xx_smp_init(); mpc86xx_smp_init();
...@@ -133,7 +133,7 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m) ...@@ -133,7 +133,7 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m)
{ {
uint svid = mfspr(SPRN_SVR); uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(), seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(),
('A' + gef_sbc610_get_board_rev() - 1)); ('A' + gef_sbc610_get_board_rev() - 1));
...@@ -212,7 +212,7 @@ static int __init declare_of_platform_devices(void) ...@@ -212,7 +212,7 @@ static int __init declare_of_platform_devices(void)
machine_device_initcall(gef_sbc610, declare_of_platform_devices); machine_device_initcall(gef_sbc610, declare_of_platform_devices);
define_machine(gef_sbc610) { define_machine(gef_sbc610) {
.name = "GE Fanuc SBC610", .name = "GE SBC610",
.probe = gef_sbc610_probe, .probe = gef_sbc610_probe,
.setup_arch = gef_sbc610_setup_arch, .setup_arch = gef_sbc610_setup_arch,
.init_IRQ = gef_sbc610_init_irq, .init_IRQ = gef_sbc610_init_irq,
......
...@@ -144,6 +144,16 @@ config FSL_EMB_PERFMON ...@@ -144,6 +144,16 @@ config FSL_EMB_PERFMON
and some e300 cores (c3 and c4). Select this only if your and some e300 cores (c3 and c4). Select this only if your
core supports the Embedded Performance Monitor APU core supports the Embedded Performance Monitor APU
config FSL_EMB_PERF_EVENT
bool
depends on FSL_EMB_PERFMON && PERF_EVENTS && !PPC_PERF_CTRS
default y
config FSL_EMB_PERF_EVENT_E500
bool
depends on FSL_EMB_PERF_EVENT && E500
default y
config 4xx config 4xx
bool bool
depends on 40x || 44x depends on 40x || 44x
......
...@@ -43,17 +43,14 @@ system_reset_iSeries: ...@@ -43,17 +43,14 @@ system_reset_iSeries:
LOAD_REG_ADDR(r23, alpaca) LOAD_REG_ADDR(r23, alpaca)
li r0,ALPACA_SIZE li r0,ALPACA_SIZE
sub r23,r13,r23 sub r23,r13,r23
divdu r23,r23,r0 /* r23 has cpu number */ divdu r24,r23,r0 /* r24 has cpu number */
LOAD_REG_ADDR(r13, paca)
mulli r0,r23,PACA_SIZE
add r13,r13,r0
mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
mfmsr r24
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
mr r24,r23
cmpwi 0,r24,0 /* Are we processor 0? */ cmpwi 0,r24,0 /* Are we processor 0? */
bne 1f bne 1f
LOAD_REG_ADDR(r13, boot_paca)
mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
mfmsr r23
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
b .__start_initialization_iSeries /* Start up the first processor */ b .__start_initialization_iSeries /* Start up the first processor */
1: mfspr r4,SPRN_CTRLF 1: mfspr r4,SPRN_CTRLF
li r5,CTRL_RUNLATCH /* Turn off the run light */ li r5,CTRL_RUNLATCH /* Turn off the run light */
...@@ -86,6 +83,16 @@ system_reset_iSeries: ...@@ -86,6 +83,16 @@ system_reset_iSeries:
#endif #endif
2: 2:
/* Load our paca now that it's been allocated */
LOAD_REG_ADDR(r13, paca)
ld r13,0(r13)
mulli r0,r24,PACA_SIZE
add r13,r13,r0
mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
mfmsr r23
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
HMT_LOW HMT_LOW
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lbz r23,PACAPROCSTART(r13) /* Test if this processor lbz r23,PACAPROCSTART(r13) /* Test if this processor
......
...@@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void) ...@@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void)
if (!get_lppaca()->shared_proc) if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 1; get_lppaca()->donate_dedicated_cpu = 1;
printk(KERN_INFO
"cpu %u (hwid %u) ceding for offline with hint %d\n",
cpu, hwcpu, cede_latency_hint);
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
extended_cede_processor(cede_latency_hint); extended_cede_processor(cede_latency_hint);
printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
cpu, hwcpu);
printk(KERN_INFO
"Decrementer value = %x Timebase value = %llx\n",
get_dec(), get_tb());
} }
printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
cpu, hwcpu);
if (!get_lppaca()->shared_proc) if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0; get_lppaca()->idle = 0;
}
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
/* /*
* NOTE: Calling start_secondary() here for now to * Call to start_secondary_resume() will not return.
* start new context. * Kernel stack will be reset and start_secondary()
* However, need to do it cleanly by resetting the * will be called to continue the online operation.
* stack pointer. */
*/ start_secondary_resume();
start_secondary(); }
}
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { /* Requested state is CPU_STATE_OFFLINE at this point */
WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
set_cpu_current_state(cpu, CPU_STATE_OFFLINE); set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
unregister_slb_shadow(hard_smp_processor_id(), unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
__pa(get_slb_shadow())); rtas_stop_self();
rtas_stop_self();
}
/* Should never get here... */ /* Should never get here... */
BUG(); BUG();
......
...@@ -9,10 +9,31 @@ enum cpu_state_vals { ...@@ -9,10 +9,31 @@ enum cpu_state_vals {
CPU_MAX_OFFLINE_STATES CPU_MAX_OFFLINE_STATES
}; };
#ifdef CONFIG_HOTPLUG_CPU
extern enum cpu_state_vals get_cpu_current_state(int cpu); extern enum cpu_state_vals get_cpu_current_state(int cpu);
extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
extern enum cpu_state_vals get_preferred_offline_state(int cpu);
extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
extern void set_default_offline_state(int cpu); extern void set_default_offline_state(int cpu);
#else
static inline enum cpu_state_vals get_cpu_current_state(int cpu)
{
return CPU_STATE_ONLINE;
}
static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
{
}
static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
{
}
static inline void set_default_offline_state(int cpu)
{
}
#endif
extern enum cpu_state_vals get_preferred_offline_state(int cpu);
extern int start_secondary(void); extern int start_secondary(void);
extern void start_secondary_resume(void);
#endif #endif
...@@ -259,12 +259,12 @@ static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) ...@@ -259,12 +259,12 @@ static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
return plpar_hcall_norets(H_IPI, servernum, mfrr); return plpar_hcall_norets(H_IPI, servernum, mfrr);
} }
static inline long plpar_xirr(unsigned long *xirr_ret) static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr)
{ {
long rc; long rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
rc = plpar_hcall(H_XIRR, retbuf); rc = plpar_hcall(H_XIRR, retbuf, cppr);
*xirr_ret = retbuf[0]; *xirr_ret = retbuf[0];
......
...@@ -120,12 +120,12 @@ static inline void direct_qirr_info(int n_cpu, u8 value) ...@@ -120,12 +120,12 @@ static inline void direct_qirr_info(int n_cpu, u8 value)
/* LPAR low level accessors */ /* LPAR low level accessors */
static inline unsigned int lpar_xirr_info_get(void) static inline unsigned int lpar_xirr_info_get(unsigned char cppr)
{ {
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long return_value; unsigned long return_value;
lpar_rc = plpar_xirr(&return_value); lpar_rc = plpar_xirr(&return_value, cppr);
if (lpar_rc != H_SUCCESS) if (lpar_rc != H_SUCCESS)
panic(" bad return code xirr - rc = %lx\n", lpar_rc); panic(" bad return code xirr - rc = %lx\n", lpar_rc);
return (unsigned int)return_value; return (unsigned int)return_value;
...@@ -331,7 +331,8 @@ static unsigned int xics_get_irq_direct(void) ...@@ -331,7 +331,8 @@ static unsigned int xics_get_irq_direct(void)
static unsigned int xics_get_irq_lpar(void) static unsigned int xics_get_irq_lpar(void)
{ {
unsigned int xirr = lpar_xirr_info_get(); struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]);
unsigned int vec = xics_xirr_vector(xirr); unsigned int vec = xics_xirr_vector(xirr);
unsigned int irq; unsigned int irq;
......
...@@ -3,6 +3,6 @@ ...@@ -3,6 +3,6 @@
extern unsigned int cpm2_get_irq(void); extern unsigned int cpm2_get_irq(void);
extern void cpm2_pic_init(struct device_node*); extern void cpm2_pic_init(struct device_node *);
#endif /* _PPC_KERNEL_CPM2_H */ #endif /* _PPC_KERNEL_CPM2_H */
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "qe_ic.h" #include "qe_ic.h"
static DEFINE_SPINLOCK(qe_ic_lock); static DEFINE_RAW_SPINLOCK(qe_ic_lock);
static struct qe_ic_info qe_ic_info[] = { static struct qe_ic_info qe_ic_info[] = {
[1] = { [1] = {
...@@ -201,13 +201,13 @@ static void qe_ic_unmask_irq(unsigned int virq) ...@@ -201,13 +201,13 @@ static void qe_ic_unmask_irq(unsigned int virq)
unsigned long flags; unsigned long flags;
u32 temp; u32 temp;
spin_lock_irqsave(&qe_ic_lock, flags); raw_spin_lock_irqsave(&qe_ic_lock, flags);
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
temp | qe_ic_info[src].mask); temp | qe_ic_info[src].mask);
spin_unlock_irqrestore(&qe_ic_lock, flags); raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
} }
static void qe_ic_mask_irq(unsigned int virq) static void qe_ic_mask_irq(unsigned int virq)
...@@ -217,7 +217,7 @@ static void qe_ic_mask_irq(unsigned int virq) ...@@ -217,7 +217,7 @@ static void qe_ic_mask_irq(unsigned int virq)
unsigned long flags; unsigned long flags;
u32 temp; u32 temp;
spin_lock_irqsave(&qe_ic_lock, flags); raw_spin_lock_irqsave(&qe_ic_lock, flags);
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
...@@ -233,7 +233,7 @@ static void qe_ic_mask_irq(unsigned int virq) ...@@ -233,7 +233,7 @@ static void qe_ic_mask_irq(unsigned int virq)
*/ */
mb(); mb();
spin_unlock_irqrestore(&qe_ic_lock, flags); raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
} }
static struct irq_chip qe_ic_irq_chip = { static struct irq_chip qe_ic_irq_chip = {
......
...@@ -948,10 +948,16 @@ static void do_monitor_cpu_combined(void) ...@@ -948,10 +948,16 @@ static void do_monitor_cpu_combined(void)
printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n", printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
temp_combi >> 16); temp_combi >> 16);
state0->overtemp += CPU_MAX_OVERTEMP / 4; state0->overtemp += CPU_MAX_OVERTEMP / 4;
} else if (temp_combi > (state0->mpu.tmax << 16)) } else if (temp_combi > (state0->mpu.tmax << 16)) {
state0->overtemp++; state0->overtemp++;
else printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
} else {
if (state0->overtemp)
printk(KERN_WARNING "Temperature back down to %d\n",
temp_combi >> 16);
state0->overtemp = 0; state0->overtemp = 0;
}
if (state0->overtemp >= CPU_MAX_OVERTEMP) if (state0->overtemp >= CPU_MAX_OVERTEMP)
critical_state = 1; critical_state = 1;
if (state0->overtemp > 0) { if (state0->overtemp > 0) {
...@@ -1023,10 +1029,16 @@ static void do_monitor_cpu_split(struct cpu_pid_state *state) ...@@ -1023,10 +1029,16 @@ static void do_monitor_cpu_split(struct cpu_pid_state *state)
" (%d) !\n", " (%d) !\n",
state->index, temp >> 16); state->index, temp >> 16);
state->overtemp += CPU_MAX_OVERTEMP / 4; state->overtemp += CPU_MAX_OVERTEMP / 4;
} else if (temp > (state->mpu.tmax << 16)) } else if (temp > (state->mpu.tmax << 16)) {
state->overtemp++; state->overtemp++;
else printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
state->index, temp >> 16, state->mpu.tmax, state->overtemp);
} else {
if (state->overtemp)
printk(KERN_WARNING "CPU %d temperature back down to %d\n",
state->index, temp >> 16);
state->overtemp = 0; state->overtemp = 0;
}
if (state->overtemp >= CPU_MAX_OVERTEMP) if (state->overtemp >= CPU_MAX_OVERTEMP)
critical_state = 1; critical_state = 1;
if (state->overtemp > 0) { if (state->overtemp > 0) {
...@@ -1085,10 +1097,16 @@ static void do_monitor_cpu_rack(struct cpu_pid_state *state) ...@@ -1085,10 +1097,16 @@ static void do_monitor_cpu_rack(struct cpu_pid_state *state)
" (%d) !\n", " (%d) !\n",
state->index, temp >> 16); state->index, temp >> 16);
state->overtemp = CPU_MAX_OVERTEMP / 4; state->overtemp = CPU_MAX_OVERTEMP / 4;
} else if (temp > (state->mpu.tmax << 16)) } else if (temp > (state->mpu.tmax << 16)) {
state->overtemp++; state->overtemp++;
else printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
state->index, temp >> 16, state->mpu.tmax, state->overtemp);
} else {
if (state->overtemp)
printk(KERN_WARNING "CPU %d temperature back down to %d\n",
state->index, temp >> 16);
state->overtemp = 0; state->overtemp = 0;
}
if (state->overtemp >= CPU_MAX_OVERTEMP) if (state->overtemp >= CPU_MAX_OVERTEMP)
critical_state = 1; critical_state = 1;
if (state->overtemp > 0) { if (state->overtemp > 0) {
......
...@@ -269,7 +269,7 @@ struct slots_pid_state ...@@ -269,7 +269,7 @@ struct slots_pid_state
#define CPU_TEMP_HISTORY_SIZE 2 #define CPU_TEMP_HISTORY_SIZE 2
#define CPU_POWER_HISTORY_SIZE 10 #define CPU_POWER_HISTORY_SIZE 10
#define CPU_PID_INTERVAL 1 #define CPU_PID_INTERVAL 1
#define CPU_MAX_OVERTEMP 30 #define CPU_MAX_OVERTEMP 90
#define CPUA_PUMP_RPM_INDEX 7 #define CPUA_PUMP_RPM_INDEX 7
#define CPUB_PUMP_RPM_INDEX 8 #define CPUB_PUMP_RPM_INDEX 8
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册