提交 2c856e14 编写于 作者: L Linus Torvalds

Merge tag 'arm64-perf' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm[64] perf updates from Will Deacon:
 "I have another mixed bag of ARM-related perf patches here.

  It's about 25% CPU and 75% interconnect, but with drivers/bus/
  languishing without an obvious maintainer or tree, Olof and I agreed
  to keep all of these PMU patches together.  I suspect a whole load of
  code from drivers/bus/arm-* can be moved under drivers/perf/, so
  that's on the radar for the future.

  Summary:

   - Initial support for ARMv8.1 CPU PMUs

   - Support for the CPU PMU in Cavium ThunderX

   - CPU PMU support for systems running 32-bit Linux in secure mode

   - Support for the system PMU in ARM CCI-550 (Cache Coherent Interconnect)"

* tag 'arm64-perf' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (26 commits)
  drivers/perf: arm_pmu: avoid NULL dereference when not using devicetree
  arm64: perf: Extend ARMV8_EVTYPE_MASK to include PMCR.LC
  arm-cci: remove unused variable
  arm-cci: don't return value from void function
  arm-cci: make private functions static
  arm-cci: CoreLink CCI-550 PMU driver
  arm-cci500: Rearrange PMU driver for code sharing with CCI-550 PMU
  arm-cci: CCI-500: Work around PMU counter writes
  arm-cci: Provide hook for writing to PMU counters
  arm-cci: Add helper to enable PMU without synchornising counters
  arm-cci: Add routines to save/restore all counters
  arm-cci: Get the status of a counter
  arm-cci: write_counter: Remove redundant check
  arm-cci: Delay PMU counter writes to pmu::pmu_enable
  arm-cci: Refactor CCI PMU enable/disable methods
  arm-cci: Group writes to counter
  arm-cci: fix handling cpumask_any_but return value
  arm-cci: simplify sysfs attr handling
  drivers/perf: arm_pmu: implement CPU_PM notifier
  arm64: dts: Add Cavium ThunderX specific PMU
  ...
...@@ -34,6 +34,7 @@ specific to ARM. ...@@ -34,6 +34,7 @@ specific to ARM.
Definition: must contain one of the following: Definition: must contain one of the following:
"arm,cci-400" "arm,cci-400"
"arm,cci-500" "arm,cci-500"
"arm,cci-550"
- reg - reg
Usage: required Usage: required
...@@ -101,6 +102,7 @@ specific to ARM. ...@@ -101,6 +102,7 @@ specific to ARM.
"arm,cci-400-pmu" - DEPRECATED, permitted only where OS has "arm,cci-400-pmu" - DEPRECATED, permitted only where OS has
secure acces to CCI registers secure acces to CCI registers
"arm,cci-500-pmu,r0" "arm,cci-500-pmu,r0"
"arm,cci-550-pmu,r0"
- reg: - reg:
Usage: required Usage: required
Value type: Integer cells. A register entry, expressed Value type: Integer cells. A register entry, expressed
......
...@@ -25,6 +25,7 @@ Required properties: ...@@ -25,6 +25,7 @@ Required properties:
"qcom,scorpion-pmu" "qcom,scorpion-pmu"
"qcom,scorpion-mp-pmu" "qcom,scorpion-mp-pmu"
"qcom,krait-pmu" "qcom,krait-pmu"
"cavium,thunder-pmu"
- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu - interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
interrupt (PPI) then 1 interrupt should be specified. interrupt (PPI) then 1 interrupt should be specified.
...@@ -46,6 +47,16 @@ Optional properties: ...@@ -46,6 +47,16 @@ Optional properties:
- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
events. events.
- secure-reg-access : Indicates that the ARMv7 Secure Debug Enable Register
(SDER) is accessible. This will cause the driver to do
any setup required that is only possible in ARMv7 secure
state. If not present the ARMv7 SDER will not be touched,
which means the PMU may fail to operate unless external
code (bootloader or security monitor) has performed the
appropriate initialisation. Note that this property is
not valid for non-ARMv7 CPUs or ARMv7 CPUs booting Linux
in Non-secure state.
Example: Example:
pmu { pmu {
......
...@@ -712,6 +712,11 @@ static const struct attribute_group *armv7_pmuv2_attr_groups[] = { ...@@ -712,6 +712,11 @@ static const struct attribute_group *armv7_pmuv2_attr_groups[] = {
#define ARMV7_EXCLUDE_USER (1 << 30) #define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27) #define ARMV7_INCLUDE_HYP (1 << 27)
/*
* Secure debug enable reg
*/
#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */
static inline u32 armv7_pmnc_read(void) static inline u32 armv7_pmnc_read(void)
{ {
u32 val; u32 val;
...@@ -1094,7 +1099,13 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, ...@@ -1094,7 +1099,13 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
static void armv7pmu_reset(void *info) static void armv7pmu_reset(void *info)
{ {
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
u32 idx, nb_cnt = cpu_pmu->num_events; u32 idx, nb_cnt = cpu_pmu->num_events, val;
if (cpu_pmu->secure_access) {
asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
val |= ARMV7_SDER_SUNIDEN;
asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
}
/* The counter and interrupt enable registers are unknown at reset. */ /* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
......
...@@ -360,6 +360,11 @@ ...@@ -360,6 +360,11 @@
<1 10 0xff01>; <1 10 0xff01>;
}; };
pmu {
compatible = "cavium,thunder-pmu", "arm,armv8-pmuv3";
interrupts = <1 7 4>;
};
soc { soc {
compatible = "simple-bus"; compatible = "simple-bus";
#address-cells = <2>; #address-cells = <2>;
......
...@@ -88,16 +88,25 @@ ...@@ -88,16 +88,25 @@
#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
#define ARMV8_PMUV3_PERFCTR_L21_TLB 0x30 #define ARMV8_PMUV3_PERFCTR_L21_TLB 0x30
/* ARMv8 implementation defined event types. */
#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD 0x40
#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST 0x41
#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD 0x42
#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST 0x43
#define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD 0x4C
#define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST 0x4D
#define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD 0x4E
#define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST 0x4F
/* ARMv8 Cortex-A53 specific event types. */ /* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL 0xC2 #define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL 0xC2
/* ARMv8 Cortex-A57 and Cortex-A72 specific event types. */ /* ARMv8 Cavium ThunderX specific event types. */
#define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD 0x40 #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST 0xE9
#define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST 0x41 #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS 0xEA
#define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD 0x42 #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS 0xEB
#define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST 0x43 #define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS 0xEC
#define ARMV8_A57_PERFCTR_DTLB_REFILL_LD 0x4c #define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS 0xED
#define ARMV8_A57_PERFCTR_DTLB_REFILL_ST 0x4d
/* PMUv3 HW events mapping. */ /* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
...@@ -132,6 +141,18 @@ static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = { ...@@ -132,6 +141,18 @@ static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
}; };
static const unsigned armv8_thunder_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = { [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
...@@ -175,16 +196,46 @@ static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] ...@@ -175,16 +196,46 @@ static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = { [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED, PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
};
static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
...@@ -325,7 +376,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { ...@@ -325,7 +376,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
NULL, NULL,
}; };
/* /*
* Perf Events' indices * Perf Events' indices
*/ */
...@@ -356,9 +406,10 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { ...@@ -356,9 +406,10 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f #define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ #define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */
/* /*
* PMOVSR: counters overflow flag status reg * PMOVSR: counters overflow flag status reg
...@@ -369,8 +420,8 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { ...@@ -369,8 +420,8 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
/* /*
* PMXEVTYPER: Event selection reg * PMXEVTYPER: Event selection reg
*/ */
#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ #define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ #define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
/* /*
* Event filters for PMUv3 * Event filters for PMUv3
...@@ -445,9 +496,16 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) ...@@ -445,9 +496,16 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
if (!armv8pmu_counter_valid(cpu_pmu, idx)) if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u writing wrong counter %d\n", pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER) else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
asm volatile("msr pmccntr_el0, %0" :: "r" (value)); /*
else if (armv8pmu_select_counter(idx) == idx) * Set the upper 32bits as this is a 64bit counter but we only
* count using the lower 32bits and we want an interrupt when
* it overflows.
*/
u64 value64 = 0xffffffff00000000ULL | value;
asm volatile("msr pmccntr_el0, %0" :: "r" (value64));
} else if (armv8pmu_select_counter(idx) == idx)
asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
} }
...@@ -722,8 +780,11 @@ static void armv8pmu_reset(void *info) ...@@ -722,8 +780,11 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx); armv8pmu_disable_intens(idx);
} }
/* Initialize & Reset PMNC: C and P bits. */ /*
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); * Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
} }
static int armv8_pmuv3_map_event(struct perf_event *event) static int armv8_pmuv3_map_event(struct perf_event *event)
...@@ -747,6 +808,13 @@ static int armv8_a57_map_event(struct perf_event *event) ...@@ -747,6 +808,13 @@ static int armv8_a57_map_event(struct perf_event *event)
ARMV8_EVTYPE_EVENT); ARMV8_EVTYPE_EVENT);
} }
static int armv8_thunder_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_thunder_perf_map,
&armv8_thunder_perf_cache_map,
ARMV8_EVTYPE_EVENT);
}
static void armv8pmu_read_num_pmnc_events(void *info) static void armv8pmu_read_num_pmnc_events(void *info)
{ {
int *nb_cnt = info; int *nb_cnt = info;
...@@ -815,11 +883,21 @@ static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -815,11 +883,21 @@ static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
return armv8pmu_probe_num_events(cpu_pmu); return armv8pmu_probe_num_events(cpu_pmu);
} }
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
armv8_pmu_init(cpu_pmu);
cpu_pmu->name = "armv8_cavium_thunder";
cpu_pmu->map_event = armv8_thunder_map_event;
cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups;
return armv8pmu_probe_num_events(cpu_pmu);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = { static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{}, {},
}; };
......
...@@ -34,15 +34,15 @@ config ARM_CCI400_PORT_CTRL ...@@ -34,15 +34,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms. interconnect for ARM platforms.
config ARM_CCI500_PMU config ARM_CCI5xx_PMU
bool "ARM CCI500 PMU support" bool "ARM CCI-500/CCI-550 PMU support"
depends on (ARM && CPU_V7) || ARM64 depends on (ARM && CPU_V7) || ARM64
depends on PERF_EVENTS depends on PERF_EVENTS
select ARM_CCI_PMU select ARM_CCI_PMU
help help
Support for PMU events monitoring on the ARM CCI-500 cache coherent Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache
interconnect. CCI-500 provides 8 independent event counters, which coherent interconnects. Both of them provide 8 independent event counters,
can count events pertaining to the slave/master interfaces as well which can count events pertaining to the slave/master interfaces as well
as the internal events to the CCI. as the internal events to the CCI.
If unsure, say Y If unsure, say Y
......
此差异已折叠。
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cpu_pm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/of_device.h> #include <linux/of_device.h>
...@@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, ...@@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
return NOTIFY_OK; return NOTIFY_OK;
} }
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct perf_event *event;
int idx;
for (idx = 0; idx < armpmu->num_events; idx++) {
/*
* If the counter is not used skip it, there is no
* need of stopping/restarting it.
*/
if (!test_bit(idx, hw_events->used_mask))
continue;
event = hw_events->events[idx];
switch (cmd) {
case CPU_PM_ENTER:
/*
* Stop and update the counter
*/
armpmu_stop(event, PERF_EF_UPDATE);
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
/* Restore and enable the counter */
armpmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
}
}
}
static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
void *v)
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
/*
* Always reset the PMU registers on power-up even if
* there are no events running.
*/
if (cmd == CPU_PM_EXIT && armpmu->reset)
armpmu->reset(armpmu);
if (!enabled)
return NOTIFY_OK;
switch (cmd) {
case CPU_PM_ENTER:
armpmu->stop(armpmu);
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
armpmu->start(armpmu);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
{
cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
}
static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}
#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
#endif
static int cpu_pmu_init(struct arm_pmu *cpu_pmu) static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{ {
int err; int err;
...@@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (err) if (err)
goto out_hw_events; goto out_hw_events;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
goto out_unregister;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
raw_spin_lock_init(&events->pmu_lock); raw_spin_lock_init(&events->pmu_lock);
...@@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0; return 0;
out_unregister:
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
out_hw_events: out_hw_events:
free_percpu(cpu_hw_events); free_percpu(cpu_hw_events);
return err; return err;
...@@ -753,6 +847,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -753,6 +847,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{ {
cpu_pm_pmu_unregister(cpu_pmu);
unregister_cpu_notifier(&cpu_pmu->hotplug_nb); unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
free_percpu(cpu_pmu->hw_events); free_percpu(cpu_pmu->hw_events);
} }
...@@ -889,6 +984,15 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -889,6 +984,15 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
init_fn = of_id->data; init_fn = of_id->data;
pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
"secure-reg-access");
/* arm64 systems boot only as non-secure */
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
pmu->secure_access = false;
}
ret = of_pmu_irq_cfg(pmu); ret = of_pmu_irq_cfg(pmu);
if (!ret) if (!ret)
ret = init_fn(pmu); ret = init_fn(pmu);
...@@ -898,7 +1002,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -898,7 +1002,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
} }
if (ret) { if (ret) {
pr_info("failed to probe PMU!\n"); pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
goto out_free; goto out_free;
} }
...@@ -918,7 +1022,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, ...@@ -918,7 +1022,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_destroy: out_destroy:
cpu_pmu_destroy(pmu); cpu_pmu_destroy(pmu);
out_free: out_free:
pr_info("failed to register PMU devices!\n"); pr_info("%s: failed to register PMU devices!\n",
of_node_full_name(node));
kfree(pmu); kfree(pmu);
return ret; return ret;
} }
...@@ -104,9 +104,11 @@ struct arm_pmu { ...@@ -104,9 +104,11 @@ struct arm_pmu {
atomic_t active_events; atomic_t active_events;
struct mutex reserve_mutex; struct mutex reserve_mutex;
u64 max_period; u64 max_period;
bool secure_access; /* 32-bit ARM only */
struct platform_device *plat_device; struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events; struct pmu_hw_events __percpu *hw_events;
struct notifier_block hotplug_nb; struct notifier_block hotplug_nb;
struct notifier_block cpu_pm_nb;
}; };
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册