diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index a1f6bc7f1e4c956cfd712bb644721de20ff30004..435864c24479876ababb616c958846a005227ad9 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count, evtype[n] = group->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } - list_for_each_entry(pe, &group->sibling_list, group_entry) { + list_for_each_entry(pe, &group->sibling_list, sibling_list) { if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index 5fb1d2254b5e2be241858c8709a3e533a594ddd6..27a9ca20933e697bf360657d4a74d7d2cf61b18d 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event) return false; } - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask)) return false; } diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c index 0a1e2280141f796e1fa62ed25352aa77606b2b95..3a89ea4c2b579c4e4a218ca03cbf39d156cd5680 100644 --- a/arch/arm/mm/cache-l2x0-pmu.c +++ b/arch/arm/mm/cache-l2x0-pmu.c @@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event) else if (!is_software_event(leader)) return false; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (sibling->pmu == pmu) num_hw++; else if (!is_software_event(sibling)) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 6668f67a61c3a2bb09fc23d9e2172d7a738020e1..46097ff3208b0188bf09799c1c7998b0c9d4dc33 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event) if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) return -EINVAL; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) return -EINVAL; } diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index f89bbd54ecec54b777b987292e7dbb48ae138b57..7c1f66050433dcf0e5e760ab231480634030a302 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count, flags[n] = group->hw.event_base; events[n++] = group->hw.config; } - list_for_each_entry(event, &group->sibling_list, group_entry) { + list_for_each_entry(event, &group->sibling_list, sibling_list) { if (event->pmu->task_ctx_nr == perf_hw_context && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 5d747b4cb8ee1803c93b2b72fa17450e102c7fda..94c2e63662c6fbbcf0c8da64678a14e8348c6211 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count, ctrs[n] = group; n++; } - list_for_each_entry(event, &group->sibling_list, group_entry) { + list_for_each_entry(event, &group->sibling_list, sibling_list) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 5c1f54758312808bc9e2e70d1953bfa324f1e40b..a0a86d369119d8a169abc6d165a06385b4810ca1 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count, events[n] = group->hw.event_base; current_idx[n++] = PIC_NO_INDEX; } - list_for_each_entry(event, &group->sibling_list, group_entry) { + list_for_each_entry(event, &group->sibling_list, sibling_list) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9c86e10f11966c289d9d7fc8a64dc600eaae1769..77a4125b6b1f06898ffda40f8c2f9f35f0f1a32c 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, group_entry) { + list_for_each_entry(event, &leader->sibling_list, sibling_list) { if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 7874c980d56921961328fa0212da9dae61458789..9e374cd22ad21242c0b9f75443d97f2dc4c58b5e 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, group_entry) { + list_for_each_entry(event, &leader->sibling_list, sibling_list) { if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5426c04fe24bc88faa88a2cff0624ccbce277ebe..c98435bdb64fa2e61655a9babba90305664120b5 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event) if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index b52332e52ca5e9c9a9d4b4ede3667c3ec8066474..1c310a4be0003e9b830a950a4c6ff88e53900b20 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -847,7 +847,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) return -EINVAL; list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) + sibling_list) if (sibling->pmu != event->pmu && !is_software_event(sibling)) return -EINVAL; diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 38f2cc2a6c7479c06078331f9944bffa794a6c9b..660680d78147c2d5dbf949775a5fa8be7a937a37 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event) memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask)); if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader)) return false; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling)) return false; } diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 0c2ed11c0603015e10c3995f21a768801fe90427..628d7a7b95260662b97a02b8d43f7c497a8eadbe 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -311,7 +311,7 @@ validate_group(struct perf_event *event) if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 7ed24b95442226581a5385afd880bbc1fee3619e..e3356087fd767d47c2bd2dee44f5f961290f1974 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -82,8 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event) counters++; } - list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) { + list_for_each_entry(sibling, &event->group_leader->sibling_list, sibling_list) { if (is_software_event(sibling)) continue; if (sibling->pmu != event->pmu) diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 4fdc8486a8e44c6c627a3fae9875039fb868e226..5e535a71896562f7522adbf9ebb3f0372e5a3414 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -535,7 +535,7 @@ static int l2_cache_event_init(struct perf_event *event) } list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) + sibling_list) if (sibling->pmu != event->pmu && !is_software_event(sibling)) { dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, @@ -572,7 +572,7 @@ static int l2_cache_event_init(struct perf_event *event) } list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) { + sibling_list) { if ((sibling != event) && !is_software_event(sibling) && (L2_EVT_GROUP(sibling->attr.config) == diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 7f6b62b29e9d296d99a9bad77de4407354bd8b6f..5dedf4b1a552de698e922cf92f580e7dc36d9d93 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event) counters = event_num_counters(event); counters += event_num_counters(leader); - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { if (is_software_event(sibling)) continue; if (sibling->pmu != event->pmu) diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index eb23311bc70c042319909206fa4f015c7da4c905..f1f4a56cab5e36fd6115dfd0915039b20a2336a6 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -950,7 +950,7 @@ static int xgene_perf_event_init(struct perf_event *event) return -EINVAL; list_for_each_entry(sibling, &event->group_leader->sibling_list, - group_entry) + sibling_list) if (sibling->pmu != event->pmu && !is_software_event(sibling)) return -EINVAL; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6e3f854a34d85b19afc8079fd2dee29749a22a2a..84044ec21b316d257d46fdf5cc783cbfb94db2b8 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -549,14 +549,9 @@ struct perf_event { struct list_head event_entry; /* - * XXX: group_entry and sibling_list should be mutually exclusive; - * either you're a sibling on a group, or you're the group leader. - * Rework the code to always use the same list element. - * * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ - struct list_head group_entry; struct list_head sibling_list; /* * Node on the pinned or flexible tree located at the event context; diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d8c0208ca4afc1847b5af01d2fb19d4b132c998..9a07bbe664510d1a8428b42621a281538feaf4ac 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader) { struct perf_event *sibling; - list_for_each_entry(sibling, &leader->sibling_list, group_entry) + list_for_each_entry(sibling, &leader->sibling_list, sibling_list) perf_event_update_time(sibling); } @@ -1835,12 +1835,12 @@ static void perf_group_attach(struct perf_event *event) group_leader->group_caps &= event->event_caps; - list_add_tail(&event->group_entry, &group_leader->sibling_list); + list_add_tail(&event->sibling_list, &group_leader->sibling_list); group_leader->nr_siblings++; perf_event__header_size(group_leader); - list_for_each_entry(pos, &group_leader->sibling_list, group_entry) + list_for_each_entry(pos, &group_leader->sibling_list, sibling_list) perf_event__header_size(pos); } @@ -1904,7 +1904,7 @@ static void perf_group_detach(struct perf_event *event) * If this is a sibling, remove it from its group. */ if (event->group_leader != event) { - list_del_init(&event->group_entry); + list_del_init(&event->sibling_list); event->group_leader->nr_siblings--; goto out; } @@ -1914,7 +1914,7 @@ static void perf_group_detach(struct perf_event *event) * upgrade the siblings to singleton events by adding them * to whatever list we are on. */ - list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { + list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { sibling->group_leader = sibling; @@ -1922,7 +1922,7 @@ static void perf_group_detach(struct perf_event *event) sibling->group_caps = event->group_caps; if (!RB_EMPTY_NODE(&event->group_node)) { - list_del_init(&sibling->group_entry); + list_del_init(&sibling->sibling_list); add_event_to_groups(sibling, event->ctx); } @@ -1932,7 +1932,7 @@ static void perf_group_detach(struct perf_event *event) out: perf_event__header_size(event->group_leader); - list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) + list_for_each_entry(tmp, &event->group_leader->sibling_list, sibling_list) perf_event__header_size(tmp); } @@ -1960,7 +1960,7 @@ static inline int pmu_filter_match(struct perf_event *event) if (!__pmu_filter_match(event)) return 0; - list_for_each_entry(child, &event->sibling_list, group_entry) { + list_for_each_entry(child, &event->sibling_list, sibling_list) { if (!__pmu_filter_match(child)) return 0; } @@ -2028,7 +2028,7 @@ group_sched_out(struct perf_event *group_event, /* * Schedule out siblings (if any): */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) + list_for_each_entry(event, &group_event->sibling_list, sibling_list) event_sched_out(event, cpuctx, ctx); perf_pmu_enable(ctx->pmu); @@ -2307,7 +2307,7 @@ group_sched_in(struct perf_event *group_event, /* * Schedule in siblings as one group (if any): */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { + list_for_each_entry(event, &group_event->sibling_list, sibling_list) { if (event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; @@ -2323,7 +2323,7 @@ group_sched_in(struct perf_event *group_event, * partial group before returning: * The events up to the failed event are scheduled out normally. */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { + list_for_each_entry(event, &group_event->sibling_list, sibling_list) { if (event == partial_group) break; @@ -3796,7 +3796,7 @@ static void __perf_event_read(void *info) pmu->read(event); - list_for_each_entry(sub, &event->sibling_list, group_entry) { + list_for_each_entry(sub, &event->sibling_list, sibling_list) { if (sub->state == PERF_EVENT_STATE_ACTIVE) { /* * Use sibling's PMU rather than @event's since @@ -4642,7 +4642,7 @@ static int __perf_read_group_add(struct perf_event *leader, if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); - list_for_each_entry(sub, &leader->sibling_list, group_entry) { + list_for_each_entry(sub, &leader->sibling_list, sibling_list) { values[n++] += perf_event_count(sub); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); @@ -4836,7 +4836,7 @@ static void perf_event_for_each(struct perf_event *event, event = event->group_leader; perf_event_for_each_child(event, func); - list_for_each_entry(sibling, &event->sibling_list, group_entry) + list_for_each_entry(sibling, &event->sibling_list, sibling_list) perf_event_for_each_child(sibling, func); } @@ -5995,7 +5995,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, __output_copy(handle, values, n * sizeof(u64)); - list_for_each_entry(sub, &leader->sibling_list, group_entry) { + list_for_each_entry(sub, &leader->sibling_list, sibling_list) { n = 0; if ((sub != event) && @@ -9813,7 +9813,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); - INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); init_event_group(event); @@ -10581,7 +10580,7 @@ SYSCALL_DEFINE5(perf_event_open, put_ctx(gctx); list_for_each_entry(sibling, &group_leader->sibling_list, - group_entry) { + sibling_list) { perf_remove_from_context(sibling, 0); put_ctx(gctx); } @@ -10603,7 +10602,7 @@ SYSCALL_DEFINE5(perf_event_open, * reachable through the group lists. */ list_for_each_entry(sibling, &group_leader->sibling_list, - group_entry) { + sibling_list) { perf_event__state_init(sibling); perf_install_in_context(ctx, sibling, sibling->cpu); get_ctx(ctx); @@ -11242,7 +11241,7 @@ static int inherit_group(struct perf_event *parent_event, * case inherit_event() will create individual events, similar to what * perf_group_detach() would do anyway. */ - list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { + list_for_each_entry(sub, &parent_event->sibling_list, sibling_list) { child_ctr = inherit_event(sub, parent, parent_ctx, child, leader, child_ctx); if (IS_ERR(child_ctr))