diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4f7001f28936f74f9cc75ba6124ee54f76e0ebe4..d275da3d81dde2e23c1d4ce35859315bc27b903f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -884,7 +884,6 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) } if (!assign || unsched) { - for (i = 0; i < n; i++) { e = cpuc->event_list[i]; /* diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index e5609522255c5702b6e6730af3193539c110467c..89e6cd61e6ae39d0ff75ebe40eb5cf7331a40f2d 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -133,7 +133,6 @@ enum intel_excl_state_type { }; struct intel_excl_states { - enum intel_excl_state_type init_state[X86_PMC_IDX_MAX]; enum intel_excl_state_type state[X86_PMC_IDX_MAX]; bool sched_started; /* true if scheduling has started */ }; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6a3e794cdc06455fba228e7834e72132b5097e7c..f3201439031d580376e534d703f0ac9b95e81d22 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1927,11 +1927,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) * makes scheduling appear as a transaction */ raw_spin_lock(&excl_cntrs->lock); - - /* - * Save a copy of our state to work on. - */ - memcpy(xl->init_state, xl->state, sizeof(xl->init_state)); } static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) @@ -1955,9 +1950,9 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt lockdep_assert_held(&excl_cntrs->lock); if (c->flags & PERF_X86_EVENT_EXCL) - xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE; + xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; else - xl->init_state[cntr] = INTEL_EXCL_SHARED; + xl->state[cntr] = INTEL_EXCL_SHARED; } static void @@ -1980,11 +1975,6 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc) xl = &excl_cntrs->states[tid]; - /* - * Commit the working state. - */ - memcpy(xl->state, xl->init_state, sizeof(xl->state)); - xl->sched_started = false; /* * release shared state lock (acquired in intel_start_scheduling()) @@ -2519,19 +2509,11 @@ struct intel_shared_regs *allocate_shared_regs(int cpu) static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) { struct intel_excl_cntrs *c; - int i; c = kzalloc_node(sizeof(struct intel_excl_cntrs), GFP_KERNEL, cpu_to_node(cpu)); if (c) { raw_spin_lock_init(&c->lock); - for (i = 0; i < X86_PMC_IDX_MAX; i++) { - c->states[0].state[i] = INTEL_EXCL_UNUSED; - c->states[0].init_state[i] = INTEL_EXCL_UNUSED; - - c->states[1].state[i] = INTEL_EXCL_UNUSED; - c->states[1].init_state[i] = INTEL_EXCL_UNUSED; - } c->core_id = -1; } return c;