未验证 提交 a95251b0 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!298 Perf tool add Hip09 json support.

Merge Pull Request from: @hejunhao3 
 
Perf tool add Hip09 json support.

```shell
perf list will look like this:
...
uncore ddrc:
  act_cnt                                           
       [count of DDRC active commands. Unit: hisi_sccl,ddrc]
  cycles                                            
       [Unit: hisi_sccl,ddrc]
  ddrc_cycles                                       
       [Count of DDRC cycles. Unit: hisi_sccl,ddrc]
  flux_rd                                           
       [DDRC read commands. Unit: hisi_sccl,ddrc]
  flux_wr                                           
       [DDRC write commands. Unit: hisi_sccl,ddrc]
  pre_act                                           
       [count of DDRC pre-active commands. Unit: hisi_sccl,ddrc]
  priority_cmd                                      
       [count of DMC commands with the highest priority. Unit: hisi_sccl,ddrc]
  read_cmd                                          
       [count of DDRC read commands. Unit: hisi_sccl,ddrc]
  read_cmd_occupancy                                
       [count of cycles occupied by the read command in the queue. Unit: hisi_sccl,ddrc]
  write_cmd                                         
       [count of DDRC write commands. Unit: hisi_sccl,ddrc]

uncore sllc:
  cycles                                            
       [Unit: hisi_sccl,sllc]
  rx_data                                           
       [Count of the data received by SLLC. Unit: hisi_sccl,sllc]
  rx_data_sum                                       
       [total cycles SLLC taken to receive data. Unit: hisi_sccl,sllc]
  rx_req                                            
       [Count of the requests received by SLLC. Unit: hisi_sccl,sllc]
  rx_req_sum                                        
       [total cycles SLLC taken to receive requests. Unit: hisi_sccl,sllc]
  sllc_cycles                                       
       [Count of SLLC cycles. Unit: hisi_sccl,sllc]
  tx_data                                           
       [Count of the data transmitted by SLLC. Unit: hisi_sccl,sllc]
  retry_cpu                                         
       [Count of the number of retry that L3C suppresses the CPU operations. Unit: hisi_sccl,l3c]
  retry_ring                                        
       [Count of the number of retry that L3C suppresses the ring operations. Unit: hisi_sccl,l3c]
...
``` 
 
Link:https://gitee.com/openeuler/kernel/pulls/298 
Reviewed-by: Ling Mingqiang <lingmingqiang@huawei.com> 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -6,10 +6,13 @@ pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
JDIR_TEST = pmu-events/arch/test
JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
find $(JDIR_TEST) -name '*.json')
#
# Locate/process JSON files in pmu-events/arch/
# directory and create tables in pmu-events.c.
#
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS)
$(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_ddrc.flux_wr",
"ConfigCode": "0x00",
"EventName": "flux_wr",
"BriefDescription": "DDRC total write operations",
"PublicDescription": "DDRC total write operations",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_ddrc.flux_rd",
"ConfigCode": "0x01",
"EventName": "flux_rd",
"BriefDescription": "DDRC total read operations",
"PublicDescription": "DDRC total read operations",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_ddrc.flux_wcmd",
"ConfigCode": "0x02",
"EventName": "flux_wcmd",
"BriefDescription": "DDRC write commands",
"PublicDescription": "DDRC write commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_ddrc.flux_rcmd",
"ConfigCode": "0x03",
"EventName": "flux_rcmd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x04",
"EventName": "uncore_hisi_ddrc.pre_cmd",
"ConfigCode": "0x04",
"EventName": "pre_cmd",
"BriefDescription": "DDRC precharge commands",
"PublicDescription": "DDRC precharge commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x05",
"EventName": "uncore_hisi_ddrc.act_cmd",
"ConfigCode": "0x05",
"EventName": "act_cmd",
"BriefDescription": "DDRC active commands",
"PublicDescription": "DDRC active commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x06",
"EventName": "uncore_hisi_ddrc.rnk_chg",
"ConfigCode": "0x06",
"EventName": "rnk_chg",
"BriefDescription": "DDRC rank commands",
"PublicDescription": "DDRC rank commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x07",
"EventName": "uncore_hisi_ddrc.rw_chg",
"ConfigCode": "0x07",
"EventName": "rw_chg",
"BriefDescription": "DDRC read and write changes",
"PublicDescription": "DDRC read and write changes",
"Unit": "hisi_sccl,ddrc"
......
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_hha.rx_ops_num",
"ConfigCode": "0x00",
"EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"PublicDescription": "The number of all operations received by the HHA",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_hha.rx_outer",
"ConfigCode": "0x01",
"EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"PublicDescription": "The number of all operations received by the HHA from another socket",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_hha.rx_sccl",
"ConfigCode": "0x02",
"EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_hha.rx_ccix",
"ConfigCode": "0x03",
"EventName": "rx_ccix",
"BriefDescription": "Count of the number of operations that HHA has received from CCIX",
"PublicDescription": "Count of the number of operations that HHA has received from CCIX",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1c",
"EventName": "uncore_hisi_hha.rd_ddr_64b",
"ConfigCode": "0x4",
"EventName": "rx_wbi",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x5",
"EventName": "rx_wbip",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x11",
"EventName": "rx_wtistash",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x1c",
"EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1d",
"EventName": "uncore_hisi_hha.wr_ddr_64b",
"ConfigCode": "0x1d",
"EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1e",
"EventName": "uncore_hisi_hha.rd_ddr_128b",
"ConfigCode": "0x1e",
"EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1f",
"EventName": "uncore_hisi_hha.wr_ddr_128b",
"ConfigCode": "0x1f",
"EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x20",
"EventName": "uncore_hisi_hha.spill_num",
"ConfigCode": "0x20",
"EventName": "spill_num",
"BriefDescription": "Count of the number of spill operations that the HHA has sent",
"PublicDescription": "Count of the number of spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x21",
"EventName": "uncore_hisi_hha.spill_success",
"ConfigCode": "0x21",
"EventName": "spill_success",
"BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
"PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x23",
"EventName": "bi_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x32",
"EventName": "mediated_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x33",
"EventName": "tx_snp_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x34",
"EventName": "tx_snp_outer",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x35",
"EventName": "tx_snp_ccix",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x38",
"EventName": "rx_snprspdata",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x3c",
"EventName": "rx_snprsp_outer",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x40",
"EventName": "sdir-lookup",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x41",
"EventName": "edir-lookup",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x42",
"EventName": "sdir-hit",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x43",
"EventName": "edir-hit",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4c",
"EventName": "sdir-home-migrate",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4d",
"EventName": "edir-home-migrate",
"Unit": "hisi_sccl,hha"
}
]
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_l3c.rd_cpipe",
"ConfigCode": "0x00",
"EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"PublicDescription": "Total read accesses",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_l3c.wr_cpipe",
"ConfigCode": "0x01",
"EventName": "wr_cpipe",
"BriefDescription": "Total write accesses",
"PublicDescription": "Total write accesses",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
"ConfigCode": "0x02",
"EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_l3c.wr_hit_cpipe",
"ConfigCode": "0x03",
"EventName": "wr_hit_cpipe",
"BriefDescription": "Total write hits",
"PublicDescription": "Total write hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x04",
"EventName": "uncore_hisi_l3c.victim_num",
"ConfigCode": "0x04",
"EventName": "victim_num",
"BriefDescription": "l3c precharge commands",
"PublicDescription": "l3c precharge commands",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x20",
"EventName": "uncore_hisi_l3c.rd_spipe",
"ConfigCode": "0x20",
"EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x21",
"EventName": "uncore_hisi_l3c.wr_spipe",
"ConfigCode": "0x21",
"EventName": "wr_spipe",
"BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x22",
"EventName": "uncore_hisi_l3c.rd_hit_spipe",
"ConfigCode": "0x22",
"EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x23",
"EventName": "uncore_hisi_l3c.wr_hit_spipe",
"ConfigCode": "0x23",
"EventName": "wr_hit_spipe",
"BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x29",
"EventName": "uncore_hisi_l3c.back_invalid",
"ConfigCode": "0x29",
"EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"PublicDescription": "Count of the number of L3C back invalid operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x40",
"EventName": "uncore_hisi_l3c.retry_cpu",
"ConfigCode": "0x40",
"EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x41",
"EventName": "uncore_hisi_l3c.retry_ring",
"ConfigCode": "0x41",
"EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x42",
"EventName": "uncore_hisi_l3c.prefetch_drop",
"ConfigCode": "0x42",
"EventName": "prefetch_drop",
"BriefDescription": "Count of the number of prefetch drops from this L3C",
"PublicDescription": "Count of the number of prefetch drops from this L3C",
"Unit": "hisi_sccl,l3c"
......
[
{
"EventCode": "0x00",
"EventName": "ddrc_cycles",
"BriefDescription": "Count of DDRC cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x01",
"EventName": "act_cnt",
"BriefDescription": "count of DDRC active commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x07",
"EventName": "priority_cmd",
"BriefDescription": "count of DMC commands with the highest priority",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x0b",
"EventName": "pre_act",
"BriefDescription": "count of DDRC pre-active commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x41",
"EventName": "read_cmd",
"BriefDescription": "count of DDRC read commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x44",
"EventName": "write_cmd",
"BriefDescription": "count of DDRC write commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x80",
"EventName": "read_cmd_occupancy",
"BriefDescription": "count of cycles occupied by the read command in the queue",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x81",
"EventName": "write_cmd_occupancy",
"BriefDescription": "count of cycles occupied by the write command in the queue",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x83",
"EventName": "flux_wr",
"BriefDescription": "DDRC write commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x84",
"EventName": "flux_rd",
"BriefDescription": "DDRC read commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "flux_wr * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC memory write(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw_write",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "flux_rd * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC memory read(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw_read",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "(flux_wr + flux_rd) * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC (including memory read and write)(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "read_cmd_occupancy / read_cmd",
"BriefDescription": "Average delay of DDRC read command scheduling",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_read_lat",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "write_cmd_occupancy / write_cmd",
"BriefDescription": "Average delay of DDRC write command scheduling",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_write_lat",
"Unit": "hisi_sccl,ddrc"
}
]
\ No newline at end of file
[
{
"EventCode": "0x00",
"EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x01",
"EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x02",
"EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1c",
"EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1d",
"EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1e",
"EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1f",
"EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x2e",
"EventName": "hha_retry",
"BriefDescription": "Count of the HHA retry",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x55",
"EventName": "hha_cycles",
"BriefDescription": "Count of the HHA cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "(rd_ddr_64b * 64 + rd_ddr_128b *128) / duration_time",
"BriefDescription": "Average bandwidth of reading DDRC(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "hha_read_ddrc_bw",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "(wr_ddr_64b * 64 + wr_ddr_128b *128) / duration_time",
"BriefDescription": "Average bandwidth of writing DDRC(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "hha_write_ddrc_bw",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "rx_outer / rx_ops_num",
"BriefDescription": "Rate of cross-chip operations received by HHA",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "cross_chip_ops_rate",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "rx_sccl / rx_ops_num",
"BriefDescription": "Rate of cross-die operations received by HHA",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "cross_die_ops_rate",
"Unit": "hisi_sccl,hha"
}
]
\ No newline at end of file
[
{
"EventCode": "0x00",
"EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x02",
"EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x20",
"EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x22",
"EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x29",
"EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x40",
"EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x41",
"EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x48",
"EventName": "l3c_hit",
"BriefDescription": "Count of the total number of L3C hit",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x80",
"EventName": "read_sum",
"Filter": "tt_core=0xff",
"BriefDescription": "total time taken by CPU to read L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xb8",
"EventName": "l3c_ref",
"BriefDescription": "Count of the total number of CPU accessed L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xb9",
"EventName": "dat_access",
"BriefDescription": "Count of the total number of CPU accessed L3C",
"Filter": "tt_core=0xff",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xc0",
"EventName": "read_alloc",
"Filter": "tt_core=0xff",
"BriefDescription": "Count of the requests issued by CPU received by L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xce",
"EventName": "retry_alloc",
"Filter": "tt_core=0xff",
"BriefDescription": "Count of the L3C retry CPU",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x7f",
"EventName": "l3c_cycles",
"BriefDescription": "Count of L3C cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"MetricExpr": "l3c_hit / l3c_ref",
"BriefDescription": "hit rate of L3C",
"Compat": "0x00000030",
"MetricGroup": "L3Cache",
"MetricName": "l3c_hit_rate",
"Unit": "hisi_sccl,l3c"
},
{
"MetricExpr": "read_sum / (l3c_ref - retry_alloc)",
"BriefDescription": "Average latency of CPU reading L3",
"Compat": "0x00000030",
"MetricGroup": "L3Cache",
"MetricName": "l3c_latency",
"Unit": "hisi_sccl,l3c"
}
]
\ No newline at end of file
[
{
"EventCode": "0x78",
"EventName": "pa_cycles",
"BriefDescription": "Count of PA cycles",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventName": "cycle",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x40",
"EventName": "rx_req",
"BriefDescription": "Count of the requests received by PA link0",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x41",
"EventName": "rx_req_link1",
"BriefDescription": "Count of the requests received by PA link1",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x42",
"EventName": "rx_req_link2",
"BriefDescription": "Count of the requests received by PA link2",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x43",
"EventName": "rx_req_link3",
"BriefDescription": "Count of the requests received by PA link3",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5c",
"EventName": "tx_req",
"BriefDescription": "Count of the requests transmitted by PA link0",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5d",
"EventName": "tx_req_link1",
"BriefDescription": "Count of the requests transmitted by PA link1",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5e",
"EventName": "tx_req_link2",
"BriefDescription": "Count of the requests transmitted by PA link2",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5f",
"EventName": "tx_req_link3",
"BriefDescription": "Count of the requests transmitted by PA link3",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"MetricExpr": "(rx_req + rx_req_link1 + rx_req_link2 + rx_req_link3) / duration_time",
"BriefDescription": "Average bandwidth of PA receive requests",
"Compat": "0x00000030",
"MetricGroup": "PA",
"MetricName": "pa_rx_req_bw",
"Unit": "hisi_sicl,pa"
},
{
"MetricExpr": "(tx_req + tx_req_link1 + tx_req_link2 + tx_req_link3) / duration_time",
"BriefDescription": "Average bandwidth of PA transmitted requests",
"Compat": "0x00000030",
"MetricGroup": "PA",
"MetricName": "pa_tx_req_bw",
"Unit": "hisi_sicl,pa"
}
]
\ No newline at end of file
[
{
"EventCode": "0x09",
"EventName": "sllc_cycles",
"BriefDescription": "Count of SLLC cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x20",
"EventName": "rx_req_sum",
"BriefDescription": "total cycles SLLC taken to receive requests",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x21",
"EventName": "rx_data_sum",
"BriefDescription": "total cycles SLLC taken to receive data",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x24",
"EventName": "tx_req_sum",
"BriefDescription": "total cycles SLLC taken to transmit requests",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x25",
"EventName": "tx_data_sum",
"BriefDescription": "total cycles SLLC taken to transmit data",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x30",
"EventName": "rx_req",
"BriefDescription": "Count of the requests received by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x31",
"EventName": "rx_data",
"BriefDescription": "Count of the data received by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x34",
"EventName": "tx_req",
"BriefDescription": "Count of the requests transmitted by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x35",
"EventName": "tx_data",
"BriefDescription": "Count of the data transmitted by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_req_sum / rx_req",
"BriefDescription": "Average latency of SLLC receive requests(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_req_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_data_sum / rx_data",
"BriefDescription": "Average latency of SLLC receive data(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_data_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_req_sum / tx_req",
"BriefDescription": "Average latency of SLLC transmit requests(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_req_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_data_sum / tx_data",
"BriefDescription": "Average latency of SLLC transmit data(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_data_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_req * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC receive requests(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_req_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_data * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC receive data(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_data_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_req * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC transmit requests(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_req_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_data * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC transmit data(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_data_bw",
"Unit": "hisi_sccl,sllc"
}
]
\ No newline at end of file
[
{
"PublicDescription": "Attributable Level 3 cache access, read",
"EventCode": "0x40",
"EventName": "L3_CACHE_RD",
"BriefDescription": "L3 cache access, read"
}
]
[
{
"ArchStdEvent": "L3_CACHE_RD"
}
]
\ No newline at end of file
......@@ -17,5 +17,26 @@
"CounterMask": "0",
"Invert": "0",
"EdgeDetect": "0"
},
{
"EventCode": "0x7",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x12",
"EventName": "uncore_imc_free_running.cache_miss",
"BriefDescription": "Total cache misses",
"PublicDescription": "Total cache misses",
"Unit": "imc_free_running"
},
{
"EventCode": "0x34",
"EventName": "uncore_imc.cache_hits",
"BriefDescription": "Total cache hits",
"PublicDescription": "Total cache hits",
"Unit": "imc"
}
]
[
{
"BriefDescription": "ddr write-cycles event",
"EventCode": "0x2b",
"EventName": "sys_ddr_pmu.write_cycles",
"Unit": "sys_ddr_pmu",
"Compat": "v8"
},
{
"EventCode": "0x84",
"EventName": "sys_hisi_ddrc.flux_rd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "sys_hisi_ddrc",
"Compat": "hip09"
}
]
......@@ -55,6 +55,7 @@ char *prog;
struct json_event {
char *name;
char *compat;
char *event;
char *desc;
char *long_desc;
......@@ -82,6 +83,23 @@ enum aggr_mode_class convert(const char *aggr_mode)
typedef int (*func)(void *data, struct json_event *je);
static LIST_HEAD(sys_event_tables);
struct sys_event_table {
struct list_head list;
char *soc_id;
};
static void free_sys_event_tables(void)
{
struct sys_event_table *et, *next;
list_for_each_entry_safe(et, next, &sys_event_tables, list) {
free(et->soc_id);
free(et);
}
}
int eprintf(int level, int var, const char *fmt, ...)
{
......@@ -263,6 +281,7 @@ static struct map {
{ "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
{ "hisi_sccl,hha", "hisi_sccl,hha" },
{ "hisi_sccl,l3c", "hisi_sccl,l3c" },
{ "hisi_sccl,sllc", "hisi_sccl,sllc" },
{ "L3PMC", "amd_l3" },
{ "DFPMC", "amd_df" },
{}
......@@ -360,6 +379,8 @@ static int print_events_table_entry(void *data, struct json_event *je)
if (je->event)
fprintf(outfp, "\t.event = \"%s\",\n", je->event);
fprintf(outfp, "\t.desc = \"%s\",\n", je->desc);
if (je->compat)
fprintf(outfp, "\t.compat = \"%s\",\n", je->compat);
fprintf(outfp, "\t.topic = \"%s\",\n", topic);
if (je->long_desc && je->long_desc[0])
fprintf(outfp, "\t.long_desc = \"%s\",\n", je->long_desc);
......@@ -390,6 +411,7 @@ struct event_struct {
struct list_head list;
char *name;
char *event;
char *compat;
char *desc;
char *long_desc;
char *pmu;
......@@ -550,10 +572,12 @@ static int json_events(const char *fn,
struct json_event je = {};
char *arch_std = NULL;
unsigned long long eventcode = 0;
unsigned long long configcode = 0;
struct msrmap *msr = NULL;
jsmntok_t *msrval = NULL;
jsmntok_t *precise = NULL;
jsmntok_t *obj = tok++;
bool configcode_present = false;
EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
for (j = 0; j < obj->size; j += 2) {
......@@ -576,6 +600,12 @@ static int json_events(const char *fn,
addfield(map, &code, "", "", val);
eventcode |= strtoul(code, NULL, 0);
free(code);
} else if (json_streq(map, field, "ConfigCode")) {
char *code = NULL;
addfield(map, &code, "", "", val);
configcode |= strtoul(code, NULL, 0);
free(code);
configcode_present = true;
} else if (json_streq(map, field, "ExtSel")) {
char *code = NULL;
addfield(map, &code, "", "", val);
......@@ -583,6 +613,8 @@ static int json_events(const char *fn,
free(code);
} else if (json_streq(map, field, "EventName")) {
addfield(map, &je.name, "", "", val);
} else if (json_streq(map, field, "Compat")) {
addfield(map, &je.compat, "", "", val);
} else if (json_streq(map, field, "BriefDescription")) {
addfield(map, &je.desc, "", "", val);
fixdesc(je.desc);
......@@ -655,7 +687,10 @@ static int json_events(const char *fn,
addfield(map, &extra_desc, " ",
"(Precise event)", NULL);
}
snprintf(buf, sizeof buf, "event=%#llx", eventcode);
if (configcode_present)
snprintf(buf, sizeof buf, "config=%#llx", configcode);
else
snprintf(buf, sizeof buf, "event=%#llx", eventcode);
addfield(map, &event, ",", buf, NULL);
if (je.desc && extra_desc)
addfield(map, &je.desc, " ", extra_desc, NULL);
......@@ -683,6 +718,7 @@ static int json_events(const char *fn,
free(event);
free(je.desc);
free(je.name);
free(je.compat);
free(je.long_desc);
free(extra_desc);
free(je.pmu);
......@@ -747,6 +783,15 @@ static char *file_name_to_table_name(char *fname)
return tblname;
}
static bool is_sys_dir(char *fname)
{
size_t len = strlen(fname), len2 = strlen("/sys");
if (len2 > len)
return false;
return !strcmp(fname+len-len2, "/sys");
}
static void print_mapping_table_prefix(FILE *outfp)
{
fprintf(outfp, "struct pmu_events_map pmu_events_map[] = {\n");
......@@ -777,10 +822,38 @@ static void print_mapping_test_table(FILE *outfp)
fprintf(outfp, "\t.cpuid = \"testcpu\",\n");
fprintf(outfp, "\t.version = \"v1\",\n");
fprintf(outfp, "\t.type = \"core\",\n");
fprintf(outfp, "\t.table = pme_test_cpu,\n");
fprintf(outfp, "\t.table = pme_test_soc_cpu,\n");
fprintf(outfp, "},\n");
}
static void print_system_event_mapping_table_prefix(FILE *outfp)
{
fprintf(outfp, "\nstruct pmu_sys_events pmu_sys_event_tables[] = {");
}
static void print_system_event_mapping_table_suffix(FILE *outfp)
{
fprintf(outfp, "\n\t{\n\t\t.table = 0\n\t},");
fprintf(outfp, "\n};\n");
}
static int process_system_event_tables(FILE *outfp)
{
struct sys_event_table *sys_event_table;
print_system_event_mapping_table_prefix(outfp);
list_for_each_entry(sys_event_table, &sys_event_tables, list) {
fprintf(outfp, "\n\t{\n\t\t.table = %s,\n\t\t.name = \"%s\",\n\t},",
sys_event_table->soc_id,
sys_event_table->soc_id);
}
print_system_event_mapping_table_suffix(outfp);
return 0;
}
static int process_mapfile(FILE *outfp, char *fpath)
{
int n = 16384;
......@@ -886,6 +959,8 @@ static void create_empty_mapping(const char *output_file)
fprintf(outfp, "#include \"pmu-events/pmu-events.h\"\n");
print_mapping_table_prefix(outfp);
print_mapping_table_suffix(outfp);
print_system_event_mapping_table_prefix(outfp);
print_system_event_mapping_table_suffix(outfp);
fclose(outfp);
}
......@@ -978,15 +1053,20 @@ static int process_one_file(const char *fpath, const struct stat *sb,
int level = ftwbuf->level;
int err = 0;
if (level == 2 && is_dir) {
if (level >= 2 && is_dir) {
int count = 0;
/*
* For level 2 directory, bname will include parent name,
* like vendor/platform. So search back from platform dir
* to find this.
* Something similar for level 3 directory, but we're a PMU
* category folder, like vendor/platform/cpu.
*/
bname = (char *) fpath + ftwbuf->base - 2;
for (;;) {
if (*bname == '/')
count++;
if (count == level - 1)
break;
bname--;
}
......@@ -999,13 +1079,13 @@ static int process_one_file(const char *fpath, const struct stat *sb,
level, sb->st_size, bname, fpath);
/* base dir or too deep */
if (level == 0 || level > 3)
if (level == 0 || level > 4)
return 0;
/* model directory, reset topic */
if ((level == 1 && is_dir && is_leaf_dir(fpath)) ||
(level == 2 && is_dir)) {
(level >= 2 && is_dir && is_leaf_dir(fpath))) {
if (close_table)
print_events_table_suffix(eventsfp);
......@@ -1021,6 +1101,22 @@ static int process_one_file(const char *fpath, const struct stat *sb,
return -1;
}
if (is_sys_dir(bname)) {
struct sys_event_table *sys_event_table;
sys_event_table = malloc(sizeof(*sys_event_table));
if (!sys_event_table)
return -1;
sys_event_table->soc_id = strdup(tblname);
if (!sys_event_table->soc_id) {
free(sys_event_table);
return -1;
}
list_add_tail(&sys_event_table->list,
&sys_event_tables);
}
print_events_table_prefix(eventsfp, tblname);
return 0;
}
......@@ -1162,6 +1258,10 @@ int main(int argc, char *argv[])
sprintf(ldirname, "%s/test", start_dirname);
rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
if (rc)
goto err_processing_std_arch_event_dir;
rc = nftw(ldirname, process_one_file, maxfds, 0);
if (rc)
goto err_processing_dir;
......@@ -1176,10 +1276,16 @@ int main(int argc, char *argv[])
}
rc = process_mapfile(eventsfp, mapfile);
fclose(eventsfp);
if (rc) {
pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
/* Make build fail */
ret = 1;
goto err_close_eventsfp;
}
rc = process_system_event_tables(eventsfp);
fclose(eventsfp);
if (rc) {
ret = 1;
goto err_out;
}
......
......@@ -12,6 +12,7 @@ enum aggr_mode_class {
*/
struct pmu_event {
const char *name;
const char *compat;
const char *event;
const char *desc;
const char *topic;
......@@ -43,10 +44,16 @@ struct pmu_events_map {
struct pmu_event *table;
};
struct pmu_sys_events {
const char *name;
struct pmu_event *table;
};
/*
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
extern struct pmu_events_map pmu_events_map[];
extern struct pmu_sys_events pmu_sys_event_tables[];
#endif
......@@ -605,7 +605,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 10);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
/* config1 */
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -614,7 +614,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 1);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config1"));
/* config2=3 */
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -623,7 +623,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 3);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config2"));
/* umask=1*/
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -661,7 +661,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
return 0;
}
......
......@@ -26,99 +26,206 @@ struct perf_pmu_test_event {
* be set in the alias.
*/
const char *alias_long_desc;
/* PMU which we should match against */
const char *matching_pmu;
};
static struct perf_pmu_test_event test_cpu_events[] = {
{
.event = {
.name = "bp_l1_btb_correct",
.event = "event=0x8a",
.desc = "L1 BTB Correction",
.topic = "branch",
},
.alias_str = "event=0x8a",
.alias_long_desc = "L1 BTB Correction",
struct perf_pmu_test_pmu {
struct perf_pmu pmu;
struct perf_pmu_test_event const *aliases[10];
};
static const struct perf_pmu_test_event bp_l1_btb_correct = {
.event = {
.name = "bp_l1_btb_correct",
.event = "event=0x8a",
.desc = "L1 BTB Correction",
.topic = "branch",
},
{
.event = {
.name = "bp_l2_btb_correct",
.event = "event=0x8b",
.desc = "L2 BTB Correction",
.topic = "branch",
},
.alias_str = "event=0x8b",
.alias_long_desc = "L2 BTB Correction",
.alias_str = "event=0x8a",
.alias_long_desc = "L1 BTB Correction",
};
static const struct perf_pmu_test_event bp_l2_btb_correct = {
.event = {
.name = "bp_l2_btb_correct",
.event = "event=0x8b",
.desc = "L2 BTB Correction",
.topic = "branch",
},
{
.event = {
.name = "segment_reg_loads.any",
.event = "umask=0x80,period=200000,event=0x6",
.desc = "Number of segment register loads",
.topic = "other",
},
.alias_str = "umask=0x80,(null)=0x30d40,event=0x6",
.alias_long_desc = "Number of segment register loads",
.alias_str = "event=0x8b",
.alias_long_desc = "L2 BTB Correction",
};
static const struct perf_pmu_test_event segment_reg_loads_any = {
.event = {
.name = "segment_reg_loads.any",
.event = "umask=0x80,period=200000,event=0x6",
.desc = "Number of segment register loads",
.topic = "other",
},
{
.event = {
.name = "dispatch_blocked.any",
.event = "umask=0x20,period=200000,event=0x9",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
.alias_str = "umask=0x20,(null)=0x30d40,event=0x9",
.alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
.alias_str = "umask=0x80,period=0x30d40,event=0x6",
.alias_long_desc = "Number of segment register loads",
};
static const struct perf_pmu_test_event dispatch_blocked_any = {
.event = {
.name = "dispatch_blocked.any",
.event = "umask=0x20,period=200000,event=0x9",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
{
.event = {
.name = "eist_trans",
.event = "umask=0x0,period=200000,event=0x3a",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
.alias_str = "umask=0,(null)=0x30d40,event=0x3a",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.alias_str = "umask=0x20,period=0x30d40,event=0x9",
.alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
};
static const struct perf_pmu_test_event eist_trans = {
.event = {
.name = "eist_trans",
.event = "umask=0x0,period=200000,event=0x3a",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
{ /* sentinel */
.event = {
.name = NULL,
},
.alias_str = "umask=0,period=0x30d40,event=0x3a",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
static const struct perf_pmu_test_event l3_cache_rd = {
.event = {
.name = "l3_cache_rd",
.event = "event=0x40",
.desc = "L3 cache access, read",
.long_desc = "Attributable Level 3 cache access, read",
.topic = "cache",
},
.alias_str = "event=0x40",
.alias_long_desc = "Attributable Level 3 cache access, read",
};
static struct perf_pmu_test_event test_uncore_events[] = {
{
.event = {
.name = "uncore_hisi_ddrc.flux_wcmd",
.event = "event=0x2",
.desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
.topic = "uncore",
.long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
},
.alias_str = "event=0x2",
.alias_long_desc = "DDRC write commands",
static const struct perf_pmu_test_event *core_events[] = {
&bp_l1_btb_correct,
&bp_l2_btb_correct,
&segment_reg_loads_any,
&dispatch_blocked_any,
&eist_trans,
&l3_cache_rd,
NULL
};
static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
.event = {
.name = "uncore_hisi_ddrc.flux_wcmd",
.event = "event=0x2",
.desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
.topic = "uncore",
.long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
},
{
.event = {
.name = "unc_cbo_xsnp_response.miss_eviction",
.event = "umask=0x81,event=0x22",
.desc = "Unit: uncore_cbox A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
.alias_str = "umask=0x81,event=0x22",
.alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.alias_str = "event=0x2",
.alias_long_desc = "DDRC write commands",
.matching_pmu = "hisi_sccl1_ddrc2",
};
static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.event = {
.name = "unc_cbo_xsnp_response.miss_eviction",
.event = "umask=0x81,event=0x22",
.desc = "Unit: uncore_cbox A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
{ /* sentinel */
.event = {
.name = NULL,
},
}
.alias_str = "umask=0x81,event=0x22",
.alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe",
.event = "event=0x7",
.desc = "Total read hits. Unit: hisi_sccl,l3c ",
.topic = "uncore",
.long_desc = "Total read hits",
.pmu = "hisi_sccl,l3c",
},
.alias_str = "event=0x7",
.alias_long_desc = "Total read hits",
.matching_pmu = "hisi_sccl3_l3c7",
};
static const struct perf_pmu_test_event uncore_imc_free_running_cache_miss = {
.event = {
.name = "uncore_imc_free_running.cache_miss",
.event = "event=0x12",
.desc = "Total cache misses. Unit: uncore_imc_free_running ",
.topic = "uncore",
.long_desc = "Total cache misses",
.pmu = "uncore_imc_free_running",
},
.alias_str = "event=0x12",
.alias_long_desc = "Total cache misses",
.matching_pmu = "uncore_imc_free_running_0",
};
static const struct perf_pmu_test_event uncore_imc_cache_hits = {
.event = {
.name = "uncore_imc.cache_hits",
.event = "event=0x34",
.desc = "Total cache hits. Unit: uncore_imc ",
.topic = "uncore",
.long_desc = "Total cache hits",
.pmu = "uncore_imc",
},
.alias_str = "event=0x34",
.alias_long_desc = "Total cache hits",
.matching_pmu = "uncore_imc_0",
};
const int total_test_events_size = ARRAY_SIZE(test_uncore_events);
static const struct perf_pmu_test_event *uncore_events[] = {
&uncore_hisi_ddrc_flux_wcmd,
&unc_cbo_xsnp_response_miss_eviction,
&uncore_hisi_l3c_rd_hit_cpipe,
&uncore_imc_free_running_cache_miss,
&uncore_imc_cache_hits,
NULL
};
static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
.event = {
.name = "sys_ddr_pmu.write_cycles",
.event = "event=0x2b",
.desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
.topic = "uncore",
.pmu = "uncore_sys_ddr_pmu",
.compat = "v8",
},
.alias_str = "event=0x2b",
.alias_long_desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
.matching_pmu = "uncore_sys_ddr_pmu",
};
static const struct perf_pmu_test_event sys_hisi_ddrc_flux_rd = {
.event = {
.name = "sys_hisi_ddrc.flux_rd",
.event = "event=0x84",
.desc = "DDRC read commands. Unit: uncore_sys_hisi_ddrc ",
.topic = "uncore",
.long_desc = "DDRC read commands",
.pmu = "uncore_sys_hisi_ddrc",
.compat = "hip09",
},
.alias_str = "event=0x84",
.alias_long_desc = "DDRC read commands",
.matching_pmu = "uncore_sys_hisi_ddrc_0",
};
static const struct perf_pmu_test_event *sys_events[] = {
&sys_ddr_pmu_write_cycles,
&sys_hisi_ddrc_flux_rd,
NULL
};
static bool is_same(const char *reference, const char *test)
{
......@@ -148,99 +255,237 @@ static struct pmu_events_map *__test_pmu_get_events_map(void)
return NULL;
}
/* Verify generated events from pmu-events.c is as expected */
static struct pmu_event *__test_pmu_get_sys_events_table(void)
{
struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
for ( ; tables->name; tables++) {
if (!strcmp("pme_test_soc_sys", tables->name))
return tables->table;
}
return NULL;
}
static int compare_pmu_events(struct pmu_event *e1, const struct pmu_event *e2)
{
if (!is_same(e1->name, e2->name)) {
pr_debug2("testing event e1 %s: mismatched name string, %s vs %s\n",
e1->name, e1->name, e2->name);
return -1;
}
if (!is_same(e1->compat, e2->compat)) {
pr_debug2("testing event e1 %s: mismatched compat string, %s vs %s\n",
e1->name, e1->compat, e2->compat);
return -1;
}
if (!is_same(e1->event, e2->event)) {
pr_debug2("testing event e1 %s: mismatched event, %s vs %s\n",
e1->name, e1->event, e2->event);
return -1;
}
if (!is_same(e1->desc, e2->desc)) {
pr_debug2("testing event e1 %s: mismatched desc, %s vs %s\n",
e1->name, e1->desc, e2->desc);
return -1;
}
if (!is_same(e1->topic, e2->topic)) {
pr_debug2("testing event e1 %s: mismatched topic, %s vs %s\n",
e1->name, e1->topic, e2->topic);
return -1;
}
if (!is_same(e1->long_desc, e2->long_desc)) {
pr_debug2("testing event e1 %s: mismatched long_desc, %s vs %s\n",
e1->name, e1->long_desc, e2->long_desc);
return -1;
}
if (!is_same(e1->pmu, e2->pmu)) {
pr_debug2("testing event e1 %s: mismatched pmu string, %s vs %s\n",
e1->name, e1->pmu, e2->pmu);
return -1;
}
if (!is_same(e1->unit, e2->unit)) {
pr_debug2("testing event e1 %s: mismatched unit, %s vs %s\n",
e1->name, e1->unit, e2->unit);
return -1;
}
if (!is_same(e1->perpkg, e2->perpkg)) {
pr_debug2("testing event e1 %s: mismatched perpkg, %s vs %s\n",
e1->name, e1->perpkg, e2->perpkg);
return -1;
}
if (!is_same(e1->aggr_mode, e2->aggr_mode)) {
pr_debug2("testing event e1 %s: mismatched aggr_mode, %s vs %s\n",
e1->name, e1->aggr_mode, e2->aggr_mode);
return -1;
}
if (!is_same(e1->metric_expr, e2->metric_expr)) {
pr_debug2("testing event e1 %s: mismatched metric_expr, %s vs %s\n",
e1->name, e1->metric_expr, e2->metric_expr);
return -1;
}
if (!is_same(e1->metric_name, e2->metric_name)) {
pr_debug2("testing event e1 %s: mismatched metric_name, %s vs %s\n",
e1->name, e1->metric_name, e2->metric_name);
return -1;
}
if (!is_same(e1->metric_group, e2->metric_group)) {
pr_debug2("testing event e1 %s: mismatched metric_group, %s vs %s\n",
e1->name, e1->metric_group, e2->metric_group);
return -1;
}
if (!is_same(e1->deprecated, e2->deprecated)) {
pr_debug2("testing event e1 %s: mismatched deprecated, %s vs %s\n",
e1->name, e1->deprecated, e2->deprecated);
return -1;
}
if (!is_same(e1->metric_constraint, e2->metric_constraint)) {
pr_debug2("testing event e1 %s: mismatched metric_constant, %s vs %s\n",
e1->name, e1->metric_constraint, e2->metric_constraint);
return -1;
}
return 0;
}
static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
struct perf_pmu_test_event const *test_event,
char const *pmu_name)
{
struct pmu_event const *event = &test_event->event;
/* An alias was found, ensure everything is in order */
if (!is_same(alias->name, event->name)) {
pr_debug("testing aliases PMU %s: mismatched name, %s vs %s\n",
pmu_name, alias->name, event->name);
return -1;
}
if (!is_same(alias->desc, event->desc)) {
pr_debug("testing aliases PMU %s: mismatched desc, %s vs %s\n",
pmu_name, alias->desc, event->desc);
return -1;
}
if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
pr_debug("testing aliases PMU %s: mismatched long_desc, %s vs %s\n",
pmu_name, alias->long_desc,
test_event->alias_long_desc);
return -1;
}
if (!is_same(alias->topic, event->topic)) {
pr_debug("testing aliases PMU %s: mismatched topic, %s vs %s\n",
pmu_name, alias->topic, event->topic);
return -1;
}
if (!is_same(alias->str, test_event->alias_str)) {
pr_debug("testing aliases PMU %s: mismatched str, %s vs %s\n",
pmu_name, alias->str, test_event->alias_str);
return -1;
}
if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
pr_debug("testing aliases PMU %s: mismatched long desc, %s vs %s\n",
pmu_name, alias->str, test_event->alias_long_desc);
return -1;
}
if (!is_same(alias->pmu_name, test_event->event.pmu)) {
pr_debug("testing aliases PMU %s: mismatched pmu_name, %s vs %s\n",
pmu_name, alias->pmu_name, test_event->event.pmu);
return -1;
}
return 0;
}
/* Verify generated events from pmu-events.c are as expected */
static int test_pmu_event_table(void)
{
struct pmu_event *sys_event_tables = __test_pmu_get_sys_events_table();
struct pmu_events_map *map = __test_pmu_get_events_map();
struct pmu_event *table;
int map_events = 0, expected_events;
/* ignore 2x sentinels */
expected_events = ARRAY_SIZE(test_cpu_events) +
ARRAY_SIZE(test_uncore_events) - 2;
/* ignore 3x sentinels */
expected_events = ARRAY_SIZE(core_events) +
ARRAY_SIZE(uncore_events) +
ARRAY_SIZE(sys_events) - 3;
if (!map)
if (!map || !sys_event_tables)
return -1;
for (table = map->table; table->name; table++) {
struct perf_pmu_test_event *test;
struct pmu_event *te;
struct perf_pmu_test_event const **test_event_table;
bool found = false;
if (table->pmu)
test = &test_uncore_events[0];
test_event_table = &uncore_events[0];
else
test = &test_cpu_events[0];
test_event_table = &core_events[0];
te = &test->event;
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
struct pmu_event const *event = &test_event->event;
for (; te->name; test++, te = &test->event) {
if (strcmp(table->name, te->name))
if (strcmp(table->name, event->name))
continue;
found = true;
map_events++;
if (!is_same(table->desc, te->desc)) {
pr_debug2("testing event table %s: mismatched desc, %s vs %s\n",
table->name, table->desc, te->desc);
if (compare_pmu_events(table, event))
return -1;
}
if (!is_same(table->topic, te->topic)) {
pr_debug2("testing event table %s: mismatched topic, %s vs %s\n",
table->name, table->topic,
te->topic);
return -1;
}
pr_debug("testing event table %s: pass\n", table->name);
}
if (!is_same(table->long_desc, te->long_desc)) {
pr_debug2("testing event table %s: mismatched long_desc, %s vs %s\n",
table->name, table->long_desc,
te->long_desc);
return -1;
}
if (!found) {
pr_err("testing event table: could not find event %s\n",
table->name);
return -1;
}
}
if (!is_same(table->unit, te->unit)) {
pr_debug2("testing event table %s: mismatched unit, %s vs %s\n",
table->name, table->unit,
te->unit);
return -1;
}
for (table = sys_event_tables; table->name; table++) {
struct perf_pmu_test_event const **test_event_table;
bool found = false;
if (!is_same(table->perpkg, te->perpkg)) {
pr_debug2("testing event table %s: mismatched perpkg, %s vs %s\n",
table->name, table->perpkg,
te->perpkg);
return -1;
}
test_event_table = &sys_events[0];
if (!is_same(table->metric_expr, te->metric_expr)) {
pr_debug2("testing event table %s: mismatched metric_expr, %s vs %s\n",
table->name, table->metric_expr,
te->metric_expr);
return -1;
}
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
struct pmu_event const *event = &test_event->event;
if (!is_same(table->metric_name, te->metric_name)) {
pr_debug2("testing event table %s: mismatched metric_name, %s vs %s\n",
table->name, table->metric_name,
te->metric_name);
return -1;
}
if (strcmp(table->name, event->name))
continue;
found = true;
map_events++;
if (!is_same(table->deprecated, te->deprecated)) {
pr_debug2("testing event table %s: mismatched deprecated, %s vs %s\n",
table->name, table->deprecated,
te->deprecated);
if (compare_pmu_events(table, event))
return -1;
}
pr_debug("testing event table %s: pass\n", table->name);
pr_debug("testing sys event table %s: pass\n", table->name);
}
if (!found) {
pr_err("testing event table: could not find event %s\n",
table->name);
pr_debug("testing event table: could not find event %s\n",
table->name);
return -1;
}
}
......@@ -266,27 +511,19 @@ static struct perf_pmu_alias *find_alias(const char *test_event, struct list_hea
}
/* Verify aliases are as expected */
static int __test__pmu_event_aliases(char *pmu_name, int *count)
static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
{
struct perf_pmu_test_event *test;
struct pmu_event *te;
struct perf_pmu_test_event const **test_event_table;
struct perf_pmu *pmu;
LIST_HEAD(aliases);
int res = 0;
bool use_uncore_table;
struct pmu_events_map *map = __test_pmu_get_events_map();
struct perf_pmu_alias *a, *tmp;
if (!map)
return -1;
if (is_pmu_core(pmu_name)) {
test = &test_cpu_events[0];
use_uncore_table = false;
} else {
test = &test_uncore_events[0];
use_uncore_table = true;
}
test_event_table = &core_events[0];
pmu = zalloc(sizeof(*pmu));
if (!pmu)
......@@ -296,90 +533,212 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
pmu_add_cpu_aliases_map(&aliases, pmu, map);
for (te = &test->event; te->name; test++, te = &test->event) {
struct perf_pmu_alias *alias = find_alias(te->name, &aliases);
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
struct pmu_event const *event = &test_event->event;
struct perf_pmu_alias *alias = find_alias(event->name, &aliases);
if (!alias) {
bool uncore_match = pmu_uncore_alias_match(pmu_name,
te->pmu);
if (use_uncore_table && !uncore_match) {
pr_debug3("testing aliases PMU %s: skip matching alias %s\n",
pmu_name, te->name);
continue;
}
pr_debug2("testing aliases PMU %s: no alias, alias_table->name=%s\n",
pmu_name, te->name);
pr_debug("testing aliases core PMU %s: no alias, alias_table->name=%s\n",
pmu_name, event->name);
res = -1;
break;
}
if (!is_same(alias->desc, te->desc)) {
pr_debug2("testing aliases PMU %s: mismatched desc, %s vs %s\n",
pmu_name, alias->desc, te->desc);
if (compare_alias_to_test_event(alias, test_event, pmu_name)) {
res = -1;
break;
}
if (!is_same(alias->long_desc, test->alias_long_desc)) {
pr_debug2("testing aliases PMU %s: mismatched long_desc, %s vs %s\n",
pmu_name, alias->long_desc,
test->alias_long_desc);
res = -1;
break;
}
(*count)++;
pr_debug2("testing aliases core PMU %s: matched event %s\n",
pmu_name, alias->name);
}
if (!is_same(alias->str, test->alias_str)) {
pr_debug2("testing aliases PMU %s: mismatched str, %s vs %s\n",
pmu_name, alias->str, test->alias_str);
res = -1;
break;
list_for_each_entry_safe(a, tmp, &aliases, list) {
list_del(&a->list);
perf_pmu_free_alias(a);
}
free(pmu);
return res;
}
static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
{
int alias_count = 0, to_match_count = 0, matched_count = 0;
struct perf_pmu_test_event const **table;
struct perf_pmu *pmu = &test_pmu->pmu;
const char *pmu_name = pmu->name;
struct perf_pmu_alias *a, *tmp, *alias;
struct pmu_events_map *map;
LIST_HEAD(aliases);
int res = 0;
map = __test_pmu_get_events_map();
if (!map)
return -1;
pmu_add_cpu_aliases_map(&aliases, pmu, map);
pmu_add_sys_aliases(&aliases, pmu);
/* Count how many aliases we generated */
list_for_each_entry(alias, &aliases, list)
alias_count++;
/* Count how many aliases we expect from the known table */
for (table = &test_pmu->aliases[0]; *table; table++)
to_match_count++;
if (alias_count != to_match_count) {
pr_debug("testing aliases uncore PMU %s: mismatch expected aliases (%d) vs found (%d)\n",
pmu_name, to_match_count, alias_count);
res = -1;
goto out;
}
list_for_each_entry(alias, &aliases, list) {
bool matched = false;
for (table = &test_pmu->aliases[0]; *table; table++) {
struct perf_pmu_test_event const *test_event = *table;
struct pmu_event const *event = &test_event->event;
if (!strcmp(event->name, alias->name)) {
if (compare_alias_to_test_event(alias,
test_event,
pmu_name)) {
continue;
}
matched = true;
matched_count++;
}
}
if (!is_same(alias->topic, te->topic)) {
pr_debug2("testing aliases PMU %s: mismatched topic, %s vs %s\n",
pmu_name, alias->topic, te->topic);
if (matched == false) {
pr_debug("testing aliases uncore PMU %s: could not match alias %s\n",
pmu_name, alias->name);
res = -1;
break;
goto out;
}
}
(*count)++;
pr_debug2("testing aliases PMU %s: matched event %s\n",
pmu_name, alias->name);
if (alias_count != matched_count) {
pr_debug("testing aliases uncore PMU %s: mismatch found aliases (%d) vs matched (%d)\n",
pmu_name, matched_count, alias_count);
res = -1;
}
out:
list_for_each_entry_safe(a, tmp, &aliases, list) {
list_del(&a->list);
perf_pmu_free_alias(a);
}
free(pmu);
return res;
}
static struct perf_pmu_test_pmu test_pmus[] = {
{
.pmu = {
.name = (char *)"hisi_sccl1_ddrc2",
.is_uncore = 1,
},
.aliases = {
&uncore_hisi_ddrc_flux_wcmd,
},
},
{
.pmu = {
.name = (char *)"uncore_cbox_0",
.is_uncore = 1,
},
.aliases = {
&unc_cbo_xsnp_response_miss_eviction,
},
},
{
.pmu = {
.name = (char *)"hisi_sccl3_l3c7",
.is_uncore = 1,
},
.aliases = {
&uncore_hisi_l3c_rd_hit_cpipe,
},
},
{
.pmu = {
.name = (char *)"uncore_imc_free_running_0",
.is_uncore = 1,
},
.aliases = {
&uncore_imc_free_running_cache_miss,
},
},
{
.pmu = {
.name = (char *)"uncore_imc_0",
.is_uncore = 1,
},
.aliases = {
&uncore_imc_cache_hits,
},
},
{
.pmu = {
.name = (char *)"uncore_sys_ddr_pmu0",
.is_uncore = 1,
.id = (char *)"v8",
},
.aliases = {
&sys_ddr_pmu_write_cycles,
},
},
{
.pmu = {
.name = (char *)"uncore_sys_hisi_ddrc_0",
.is_uncore = 1,
.id = (char *)"hip09",
},
.aliases = {
&sys_hisi_ddrc_flux_rd,
},
},
};
/* Test that aliases generated are as expected */
static int test_aliases(void)
{
struct perf_pmu *pmu = NULL;
unsigned long i;
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
int count = 0;
if (!is_pmu_core(pmu->name))
continue;
if (list_empty(&pmu->format)) {
pr_debug2("skipping testing PMU %s\n", pmu->name);
pr_debug2("skipping testing core PMU %s\n", pmu->name);
continue;
}
if (__test__pmu_event_aliases(pmu->name, &count)) {
pr_debug("testing PMU %s aliases: failed\n", pmu->name);
if (__test_core_pmu_event_aliases(pmu->name, &count)) {
pr_debug("testing core PMU %s aliases: failed\n", pmu->name);
return -1;
}
if (count == 0)
pr_debug3("testing PMU %s aliases: no events to match\n",
if (count == 0) {
pr_debug("testing core PMU %s aliases: no events to match\n",
pmu->name);
else
pr_debug("testing PMU %s aliases: pass\n", pmu->name);
return -1;
}
pr_debug("testing core PMU %s aliases: pass\n", pmu->name);
}
for (i = 0; i < ARRAY_SIZE(test_pmus); i++) {
int res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
if (res)
return res;
}
return 0;
......
......@@ -180,11 +180,22 @@ void evlist__remove(struct evlist *evlist, struct evsel *evsel)
void perf_evlist__splice_list_tail(struct evlist *evlist,
struct list_head *list)
{
struct evsel *evsel, *temp;
while (!list_empty(list)) {
struct evsel *evsel, *temp, *leader = NULL;
__evlist__for_each_entry_safe(list, temp, evsel) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
__evlist__for_each_entry_safe(list, temp, evsel) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
leader = evsel;
break;
}
__evlist__for_each_entry_safe(list, temp, evsel) {
if (evsel->leader == leader) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
}
}
}
}
......
......@@ -279,7 +279,9 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
* when then group is left.
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader)
ev->leader != metric_events[i]->leader &&
!strcmp(ev->leader->pmu_name,
metric_events[i]->leader->pmu_name))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
......@@ -413,6 +415,12 @@ static bool match_metric(const char *n, const char *list)
return false;
}
static bool match_pe_metric(struct pmu_event *pe, const char *metric)
{
return match_metric(pe->metric_group, metric) ||
match_metric(pe->metric_name, metric);
}
struct mep {
struct rb_node nd;
const char *name;
......@@ -491,6 +499,115 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
putchar('\n');
}
static int metricgroup__print_pmu_event(struct pmu_event *pe,
bool metricgroups, char *filter,
bool raw, bool details,
struct rblist *groups,
struct strlist *metriclist)
{
const char *g;
char *omg, *mg;
g = pe->metric_group;
if (!g && pe->metric_name) {
if (pe->name)
return 0;
g = "No_group";
}
if (!g)
return 0;
mg = strdup(g);
if (!mg)
return -ENOMEM;
omg = mg;
while ((g = strsep(&mg, ";")) != NULL) {
struct mep *me;
char *s;
g = skip_spaces(g);
if (*g == 0)
g = "No_group";
if (filter && !strstr(g, filter))
continue;
if (raw)
s = (char *)pe->metric_name;
else {
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return -1;
if (details) {
if (asprintf(&s, "%s\n%*s%s]",
s, 8, "[", pe->metric_expr) < 0)
return -1;
}
}
if (!s)
continue;
if (!metricgroups) {
strlist__add(metriclist, s);
} else {
me = mep_lookup(groups, g);
if (!me)
continue;
strlist__add(me->metrics, s);
}
if (!raw)
free(s);
}
free(omg);
return 0;
}
struct metricgroup_print_sys_idata {
struct strlist *metriclist;
char *filter;
struct rblist *groups;
bool metricgroups;
bool raw;
bool details;
};
typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
struct metricgroup_iter_data {
metricgroup_sys_event_iter_fn fn;
void *data;
};
static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
{
struct metricgroup_iter_data *d = data;
struct perf_pmu *pmu = NULL;
if (!pe->metric_expr || !pe->compat)
return 0;
while ((pmu = perf_pmu__scan(pmu))) {
if (!pmu->id || strcmp(pmu->id, pe->compat))
continue;
return d->fn(pe, d->data);
}
return 0;
}
static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
{
struct metricgroup_print_sys_idata *d = data;
return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
d->details, d->groups, d->metriclist);
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details)
{
......@@ -501,9 +618,6 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
struct rb_node *node, *next;
struct strlist *metriclist = NULL;
if (!map)
return;
if (!metricgroups) {
metriclist = strlist__new(NULL, NULL);
if (!metriclist)
......@@ -514,67 +628,33 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
groups.node_new = mep_new;
groups.node_cmp = mep_cmp;
groups.node_delete = mep_delete;
for (i = 0; ; i++) {
const char *g;
for (i = 0; map; i++) {
pe = &map->table[i];
if (!pe->name && !pe->metric_group && !pe->metric_name)
break;
if (!pe->metric_expr)
continue;
g = pe->metric_group;
if (!g && pe->metric_name) {
if (pe->name)
continue;
g = "No_group";
}
if (g) {
char *omg;
char *mg = strdup(g);
if (!mg)
return;
omg = mg;
while ((g = strsep(&mg, ";")) != NULL) {
struct mep *me;
char *s;
g = skip_spaces(g);
if (*g == 0)
g = "No_group";
if (filter && !strstr(g, filter))
continue;
if (raw)
s = (char *)pe->metric_name;
else {
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
if (details) {
if (asprintf(&s, "%s\n%*s%s]",
s, 8, "[", pe->metric_expr) < 0)
return;
}
}
if (!s)
continue;
if (metricgroup__print_pmu_event(pe, metricgroups, filter,
raw, details, &groups,
metriclist) < 0)
return;
}
if (!metricgroups) {
strlist__add(metriclist, s);
} else {
me = mep_lookup(&groups, g);
if (!me)
continue;
strlist__add(me->metrics, s);
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__print_sys_event_iter,
.data = (void *) &(struct metricgroup_print_sys_idata){
.metriclist = metriclist,
.metricgroups = metricgroups,
.filter = filter,
.raw = raw,
.details = details,
.groups = &groups,
},
};
if (!raw)
free(s);
}
free(omg);
}
pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
}
if (!filter || !rblist__empty(&groups)) {
......@@ -683,6 +763,16 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
return 1;
}
struct metricgroup_add_iter_data {
struct list_head *metric_list;
const char *metric;
struct metric **m;
struct expr_ids *ids;
int *ret;
bool *has_match;
bool metric_no_group;
};
static int __add_metric(struct list_head *metric_list,
struct pmu_event *pe,
bool metric_no_group,
......@@ -792,10 +882,11 @@ static int __add_metric(struct list_head *metric_list,
return 0;
}
#define map_for_each_event(__pe, __idx, __map) \
for (__idx = 0, __pe = &__map->table[__idx]; \
__pe->name || __pe->metric_group || __pe->metric_name; \
__pe = &__map->table[++__idx])
#define map_for_each_event(__pe, __idx, __map) \
if (__map) \
for (__idx = 0, __pe = &__map->table[__idx]; \
__pe->name || __pe->metric_group || __pe->metric_name; \
__pe = &__map->table[++__idx])
#define map_for_each_metric(__pe, __idx, __map, __metric) \
map_for_each_event(__pe, __idx, __map) \
......@@ -964,6 +1055,29 @@ static int add_metric(struct list_head *metric_list,
return ret;
}
static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
void *data)
{
struct metricgroup_add_iter_data *d = data;
int ret;
if (!match_pe_metric(pe, d->metric))
return 0;
ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
if (ret)
return ret;
ret = resolve_metric(d->metric_no_group,
d->metric_list, NULL, d->ids);
if (ret)
return ret;
*(d->has_match) = true;
return *d->ret;
}
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
struct strbuf *events,
struct list_head *metric_list,
......@@ -994,6 +1108,22 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
goto out;
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__add_metric_sys_event_iter,
.data = (void *) &(struct metricgroup_add_iter_data) {
.metric_list = &list,
.metric = metric,
.metric_no_group = metric_no_group,
.m = &m,
.ids = &ids,
.has_match = &has_match,
.ret = &ret,
},
};
pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
}
/* End of pmu events. */
if (!has_match) {
ret = -EINVAL;
......@@ -1120,9 +1250,6 @@ int metricgroup__parse_groups(const struct option *opt,
struct evlist *perf_evlist = *(struct evlist **)opt->value;
struct pmu_events_map *map = pmu_events_map__find();
if (!map)
return 0;
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, map);
}
......
......@@ -2967,7 +2967,7 @@ int parse_events_term__num(struct parse_events_term **term,
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = type_term,
.config = config,
.config = config ? : strdup(config_term_names[type_term]),
.no_value = no_value,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
......
......@@ -312,7 +312,7 @@ PE_NAME opt_pmu_config
if (!strncmp(name, "uncore_", 7) &&
strncmp($1, "uncore_", 7))
name += 7;
if (!fnmatch(pattern, name, 0)) {
if (!perf_pmu__match(pattern, name, $1)) {
if (parse_events_copy_term_list(orig_terms, &terms))
CLEANUP_YYABORT;
if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
......
......@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/ctype.h>
#include <subcmd/pager.h>
#include <sys/types.h>
#include <errno.h>
......@@ -17,6 +18,7 @@
#include <locale.h>
#include <regex.h>
#include <perf/cpumap.h>
#include <fnmatch.h>
#include "debug.h"
#include "evsel.h"
#include "pmu.h"
......@@ -283,6 +285,7 @@ void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->str);
zfree(&newalias->metric_expr);
zfree(&newalias->metric_name);
zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
}
......@@ -297,6 +300,10 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
list_for_each_entry(a, alist, list) {
if (!strcasecmp(newalias->name, a->name)) {
if (newalias->pmu_name && a->pmu_name &&
!strcasecmp(newalias->pmu_name, a->pmu_name)) {
continue;
}
perf_pmu_update_alias(a, newalias);
perf_pmu_free_alias(newalias);
return true;
......@@ -382,6 +389,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
}
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
alias->str = strdup(newval);
alias->pmu_name = metric_name ? strdup(metric_name) : NULL;
if (deprecated)
alias->deprecated = true;
......@@ -597,6 +605,7 @@ static struct perf_cpu_map *__pmu_cpumask(const char *path)
* Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
* may have a "cpus" file.
*/
#define SYS_TEMPLATE_ID "./bus/event_source/devices/%s/identifier"
#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
......@@ -635,6 +644,21 @@ static bool pmu_is_uncore(const char *name)
return file_available(path);
}
static char *pmu_id(const char *name)
{
char path[PATH_MAX], *str;
size_t len;
snprintf(path, PATH_MAX, SYS_TEMPLATE_ID, name);
if (sysfs__read_str(path, &str, &len) < 0)
return NULL;
str[len - 1] = 0; /* remove line feed */
return str;
}
/*
* PMU CORE devices have different name other than cpu in sysfs on some
* platforms.
......@@ -701,6 +725,35 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
/*
* Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name
* to be valid.
*/
static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok)
{
const char *p;
if (strncmp(pmu_name, tok, strlen(tok)))
return false;
p = pmu_name + strlen(tok);
if (*p == 0)
return true;
if (*p == '_')
++p;
/* Ensure we end in a number */
while (1) {
if (!isdigit(*p) && (*p != '_'))
return false;
if (*(++p) == 0)
break;
}
return true;
}
struct pmu_events_map *__weak pmu_events_map__find(void)
{
return perf_pmu__find_map(NULL);
......@@ -732,12 +785,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
* match "socket" in "socketX_pmunameY" and then "pmuname" in
* "pmunameY".
*/
for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) {
while (1) {
char *next_tok = strtok_r(NULL, ",", &tmp);
name = strstr(name, tok);
if (!name) {
if (!name ||
(!next_tok && !perf_pmu__valid_suffix(name, tok))) {
res = false;
goto out;
}
if (!next_tok)
break;
tok = next_tok;
name += strlen(tok);
}
res = true;
......@@ -771,8 +831,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
break;
}
if (pmu_is_uncore(name) &&
pmu_uncore_alias_match(pname, name))
if (pmu->is_uncore && pmu_uncore_alias_match(pname, name))
goto new_alias;
if (strcmp(pname, name))
......@@ -801,6 +860,83 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
pmu_add_cpu_aliases_map(head, pmu, map);
}
void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data)
{
int i = 0;
while (1) {
struct pmu_sys_events *event_table;
int j = 0;
event_table = &pmu_sys_event_tables[i++];
if (!event_table->table)
break;
while (1) {
struct pmu_event *pe = &event_table->table[j++];
int ret;
if (!pe->name && !pe->metric_group && !pe->metric_name)
break;
ret = fn(pe, data);
if (ret)
break;
}
}
}
struct pmu_sys_event_iter_data {
struct list_head *head;
struct perf_pmu *pmu;
};
static int pmu_add_sys_aliases_iter_fn(struct pmu_event *pe, void *data)
{
struct pmu_sys_event_iter_data *idata = data;
struct perf_pmu *pmu = idata->pmu;
if (!pe->name) {
if (pe->metric_group || pe->metric_name)
return 0;
return -EINVAL;
}
if (!pe->compat || !pe->pmu)
return 0;
if (!strcmp(pmu->id, pe->compat) &&
pmu_uncore_alias_match(pe->pmu, pmu->name)) {
__perf_pmu__new_alias(idata->head, NULL,
(char *)pe->name,
(char *)pe->desc,
(char *)pe->event,
(char *)pe->long_desc,
(char *)pe->topic,
(char *)pe->unit,
(char *)pe->perpkg,
(char *)pe->metric_expr,
(char *)pe->metric_name,
(char *)pe->deprecated);
}
return 0;
}
void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu)
{
struct pmu_sys_event_iter_data idata = {
.head = head,
.pmu = pmu,
};
if (!pmu->id)
return;
pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, &idata);
}
struct perf_event_attr * __weak
perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
......@@ -852,8 +988,11 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
......@@ -1721,3 +1860,14 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
return nr_caps;
}
int perf_pmu__match(char *pattern, char *name, char *tok)
{
if (fnmatch(pattern, name, 0))
return -1;
if (tok && !perf_pmu__valid_suffix(name, tok))
return -1;
return 0;
}
......@@ -30,6 +30,7 @@ struct perf_pmu_caps {
struct perf_pmu {
char *name;
char *id;
__u32 type;
bool selectable;
bool is_uncore;
......@@ -71,10 +72,12 @@ struct perf_pmu_alias {
bool deprecated;
char *metric_expr;
char *metric_name;
char *pmu_name;
};
struct perf_pmu *perf_pmu__find(const char *name);
struct perf_pmu *perf_pmu__find_by_type(unsigned int type);
void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms,
struct parse_events_error *error);
......@@ -117,8 +120,12 @@ struct pmu_events_map *pmu_events_map__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
typedef int (*pmu_sys_event_iter_fn)(struct pmu_event *pe, void *data);
void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu);
int perf_pmu__match(char *pattern, char *name, char *tok);
#endif /* __PMU_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册