提交 891aa1e0 编写于 作者: L Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Five kernel fixes:

   - an mmap tracing ABI fix for certain mappings

   - a use-after-free fix, found via KASAN

   - three CPU hotplug related x86 PMU driver fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel/uncore: Make package handling more robust
  perf/x86/intel/uncore: Clean up hotplug conversion fallout
  perf/x86/intel/rapl: Make package handling more robust
  perf/core: Fix PERF_RECORD_MMAP2 prot/flags for anonymous memory
  perf/core: Fix use-after-free bug
......@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
return rapl_pmus->pmus[topology_logical_package_id(cpu)];
unsigned int pkgid = topology_logical_package_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
}
static inline u64 rapl_read_counter(struct perf_event *event)
......@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
/* must be done before validate_group */
pmu = cpu_to_rapl_pmu(event->cpu);
if (!pmu)
return -EINVAL;
event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = msr;
......@@ -585,26 +593,7 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
cpumask_set_cpu(cpu, &rapl_cpu_mask);
pmu->cpu = cpu;
return 0;
}
static int rapl_cpu_prepare(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
if (pmu)
return 0;
if (!pmu) {
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -ENOMEM;
......@@ -613,9 +602,21 @@ static int rapl_cpu_prepare(unsigned int cpu)
INIT_LIST_HEAD(&pmu->active_list);
pmu->pmu = &rapl_pmus->pmu;
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
pmu->cpu = -1;
rapl_hrtimer_init(pmu);
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
}
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
cpumask_set_cpu(cpu, &rapl_cpu_mask);
pmu->cpu = cpu;
return 0;
}
......@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
/*
* Install callbacks. Core will call them for each online cpu.
*/
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
rapl_cpu_prepare, NULL);
if (ret)
goto out;
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
"perf/x86/rapl:online",
rapl_cpu_online, rapl_cpu_offline);
if (ret)
goto out1;
goto out;
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
goto out2;
goto out1;
rapl_advertise();
return 0;
out2:
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out1:
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
......@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
}
......
......@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
return pmu->boxes[topology_logical_package_id(cpu)];
unsigned int pkgid = topology_logical_package_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
......@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
pmu->registered = false;
}
static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
{
struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_box *box;
int i, pkg;
if (pmu) {
pkg = topology_physical_package_id(cpu);
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box)
uncore_box_exit(box);
}
}
}
static void uncore_exit_boxes(void *dummy)
{
struct intel_uncore_type **types;
for (types = uncore_msr_uncores; *types; types++)
__uncore_exit_boxes(*types++, smp_processor_id());
}
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
int pkg;
......@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
}
}
static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0;
}
static int first_init;
static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, ncpus = 1;
if (first_init) {
/*
* On init we get the number of online cpus in the package
* and set refcount for all of them.
*/
ncpus = cpumask_weight(topology_core_cpumask(cpu));
}
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box)
continue;
/* The first cpu on a package activates the box */
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
uncore_box_init(box);
}
}
return 0;
}
static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
/* First cpu of a package allocates the box */
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
box->pmu = pmu;
box->pkgid = pkg;
pmu->boxes[pkg] = box;
}
}
return 0;
}
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu)
{
......@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
static int uncore_event_cpu_offline(unsigned int cpu)
{
int target;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return 0;
goto unref;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
......@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
unref:
/* Clear the references */
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0;
}
static int allocate_boxes(struct intel_uncore_type **types,
unsigned int pkg, unsigned int cpu)
{
struct intel_uncore_box *box, *tmp;
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
LIST_HEAD(allocated);
int i;
/* Try to allocate all required boxes */
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
goto cleanup;
box->pmu = pmu;
box->pkgid = pkg;
list_add(&box->active_list, &allocated);
}
}
/* Install them in the pmus */
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
box->pmu->boxes[pkg] = box;
}
return 0;
cleanup:
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
kfree(box);
}
return -ENOMEM;
}
static int uncore_event_cpu_online(unsigned int cpu)
{
int target;
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, ret, pkg, target;
pkg = topology_logical_package_id(cpu);
ret = allocate_boxes(types, pkg, cpu);
if (ret)
return ret;
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
/*
* Check if there is an online cpu in the package
......@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;
/*
* Install callbacks. Core will call them for each online cpu.
*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required and
* uncore_cpu_prepare() is not called for each online cpu.
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
/* Install hotplug callbacks to setup the targets for each package */
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"perf/x86/intel/uncore:online",
uncore_event_cpu_online,
uncore_event_cpu_offline);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"perf/x86/uncore:starting",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"perf/x86/uncore:online",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;
err:
/* Undo box->init_box() */
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
return ret;
......@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
}
......
......@@ -8,9 +8,7 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_X86_RAPL_PREP,
CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
......@@ -86,7 +84,6 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
......
......@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
lockdep_assert_held(&ctx->lock);
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
......@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
lockdep_assert_held(&event->ctx->lock);
/*
* We can have double attach due to group movement in perf_event_open.
*/
......@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
lockdep_assert_held(&event->ctx->lock);
/*
* We can have double detach due to exit/hot-unplug + close.
*/
......@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
*/
static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
{
lockdep_assert_held(&event->ctx->mutex);
struct perf_event_context *ctx = event->ctx;
lockdep_assert_held(&ctx->mutex);
event_function_call(event, __perf_remove_from_context, (void *)flags);
/*
* The above event_function_call() can NO-OP when it hits
* TASK_TOMBSTONE. In that case we must already have been detached
* from the context (by perf_event_exit_event()) but the grouping
* might still be in-tact.
*/
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
if ((flags & DETACH_GROUP) &&
(event->attach_state & PERF_ATTACH_GROUP)) {
/*
* Since in that case we cannot possibly be scheduled, simply
* detach now.
*/
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
raw_spin_unlock_irq(&ctx->lock);
}
}
/*
......@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
char *buf = NULL;
char *name;
if (vma->vm_flags & VM_READ)
prot |= PROT_READ;
if (vma->vm_flags & VM_WRITE)
prot |= PROT_WRITE;
if (vma->vm_flags & VM_EXEC)
prot |= PROT_EXEC;
if (vma->vm_flags & VM_MAYSHARE)
flags = MAP_SHARED;
else
flags = MAP_PRIVATE;
if (vma->vm_flags & VM_DENYWRITE)
flags |= MAP_DENYWRITE;
if (vma->vm_flags & VM_MAYEXEC)
flags |= MAP_EXECUTABLE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
if (vma->vm_flags & VM_HUGETLB)
flags |= MAP_HUGETLB;
if (file) {
struct inode *inode;
dev_t dev;
......@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
maj = MAJOR(dev);
min = MINOR(dev);
if (vma->vm_flags & VM_READ)
prot |= PROT_READ;
if (vma->vm_flags & VM_WRITE)
prot |= PROT_WRITE;
if (vma->vm_flags & VM_EXEC)
prot |= PROT_EXEC;
if (vma->vm_flags & VM_MAYSHARE)
flags = MAP_SHARED;
else
flags = MAP_PRIVATE;
if (vma->vm_flags & VM_DENYWRITE)
flags |= MAP_DENYWRITE;
if (vma->vm_flags & VM_MAYEXEC)
flags |= MAP_EXECUTABLE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
if (vma->vm_flags & VM_HUGETLB)
flags |= MAP_HUGETLB;
goto got_name;
} else {
if (vma->vm_ops && vma->vm_ops->name) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册