未验证 提交 a95251b0 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!298 Perf tool add Hip09 json support.

Merge Pull Request from: @hejunhao3 
 
Perf tool add Hip09 json support.

```shell
perf list will look like this:
...
uncore ddrc:
  act_cnt                                           
       [count of DDRC active commands. Unit: hisi_sccl,ddrc]
  cycles                                            
       [Unit: hisi_sccl,ddrc]
  ddrc_cycles                                       
       [Count of DDRC cycles. Unit: hisi_sccl,ddrc]
  flux_rd                                           
       [DDRC read commands. Unit: hisi_sccl,ddrc]
  flux_wr                                           
       [DDRC write commands. Unit: hisi_sccl,ddrc]
  pre_act                                           
       [count of DDRC pre-active commands. Unit: hisi_sccl,ddrc]
  priority_cmd                                      
       [count of DMC commands with the highest priority. Unit: hisi_sccl,ddrc]
  read_cmd                                          
       [count of DDRC read commands. Unit: hisi_sccl,ddrc]
  read_cmd_occupancy                                
       [count of cycles occupied by the read command in the queue. Unit: hisi_sccl,ddrc]
  write_cmd                                         
       [count of DDRC write commands. Unit: hisi_sccl,ddrc]

uncore sllc:
  cycles                                            
       [Unit: hisi_sccl,sllc]
  rx_data                                           
       [Count of the data received by SLLC. Unit: hisi_sccl,sllc]
  rx_data_sum                                       
       [total cycles SLLC taken to receive data. Unit: hisi_sccl,sllc]
  rx_req                                            
       [Count of the requests received by SLLC. Unit: hisi_sccl,sllc]
  rx_req_sum                                        
       [total cycles SLLC taken to receive requests. Unit: hisi_sccl,sllc]
  sllc_cycles                                       
       [Count of SLLC cycles. Unit: hisi_sccl,sllc]
  tx_data                                           
       [Count of the data transmitted by SLLC. Unit: hisi_sccl,sllc]
  retry_cpu                                         
       [Count of the number of retry that L3C suppresses the CPU operations. Unit: hisi_sccl,l3c]
  retry_ring                                        
       [Count of the number of retry that L3C suppresses the ring operations. Unit: hisi_sccl,l3c]
...
``` 
 
Link:https://gitee.com/openeuler/kernel/pulls/298 
Reviewed-by: Ling Mingqiang <lingmingqiang@huawei.com> 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -6,10 +6,13 @@ pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
JDIR_TEST = pmu-events/arch/test
JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
find $(JDIR_TEST) -name '*.json')
#
# Locate/process JSON files in pmu-events/arch/
# directory and create tables in pmu-events.c.
#
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS)
$(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_ddrc.flux_wr",
"ConfigCode": "0x00",
"EventName": "flux_wr",
"BriefDescription": "DDRC total write operations",
"PublicDescription": "DDRC total write operations",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_ddrc.flux_rd",
"ConfigCode": "0x01",
"EventName": "flux_rd",
"BriefDescription": "DDRC total read operations",
"PublicDescription": "DDRC total read operations",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_ddrc.flux_wcmd",
"ConfigCode": "0x02",
"EventName": "flux_wcmd",
"BriefDescription": "DDRC write commands",
"PublicDescription": "DDRC write commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_ddrc.flux_rcmd",
"ConfigCode": "0x03",
"EventName": "flux_rcmd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x04",
"EventName": "uncore_hisi_ddrc.pre_cmd",
"ConfigCode": "0x04",
"EventName": "pre_cmd",
"BriefDescription": "DDRC precharge commands",
"PublicDescription": "DDRC precharge commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x05",
"EventName": "uncore_hisi_ddrc.act_cmd",
"ConfigCode": "0x05",
"EventName": "act_cmd",
"BriefDescription": "DDRC active commands",
"PublicDescription": "DDRC active commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x06",
"EventName": "uncore_hisi_ddrc.rnk_chg",
"ConfigCode": "0x06",
"EventName": "rnk_chg",
"BriefDescription": "DDRC rank commands",
"PublicDescription": "DDRC rank commands",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x07",
"EventName": "uncore_hisi_ddrc.rw_chg",
"ConfigCode": "0x07",
"EventName": "rw_chg",
"BriefDescription": "DDRC read and write changes",
"PublicDescription": "DDRC read and write changes",
"Unit": "hisi_sccl,ddrc"
......
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_hha.rx_ops_num",
"ConfigCode": "0x00",
"EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"PublicDescription": "The number of all operations received by the HHA",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_hha.rx_outer",
"ConfigCode": "0x01",
"EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"PublicDescription": "The number of all operations received by the HHA from another socket",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_hha.rx_sccl",
"ConfigCode": "0x02",
"EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_hha.rx_ccix",
"ConfigCode": "0x03",
"EventName": "rx_ccix",
"BriefDescription": "Count of the number of operations that HHA has received from CCIX",
"PublicDescription": "Count of the number of operations that HHA has received from CCIX",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1c",
"EventName": "uncore_hisi_hha.rd_ddr_64b",
"ConfigCode": "0x4",
"EventName": "rx_wbi",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x5",
"EventName": "rx_wbip",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x11",
"EventName": "rx_wtistash",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x1c",
"EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1d",
"EventName": "uncore_hisi_hha.wr_ddr_64b",
"ConfigCode": "0x1d",
"EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1e",
"EventName": "uncore_hisi_hha.rd_ddr_128b",
"ConfigCode": "0x1e",
"EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1f",
"EventName": "uncore_hisi_hha.wr_ddr_128b",
"ConfigCode": "0x1f",
"EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x20",
"EventName": "uncore_hisi_hha.spill_num",
"ConfigCode": "0x20",
"EventName": "spill_num",
"BriefDescription": "Count of the number of spill operations that the HHA has sent",
"PublicDescription": "Count of the number of spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x21",
"EventName": "uncore_hisi_hha.spill_success",
"ConfigCode": "0x21",
"EventName": "spill_success",
"BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
"PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x23",
"EventName": "bi_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x32",
"EventName": "mediated_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x33",
"EventName": "tx_snp_num",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x34",
"EventName": "tx_snp_outer",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x35",
"EventName": "tx_snp_ccix",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x38",
"EventName": "rx_snprspdata",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x3c",
"EventName": "rx_snprsp_outer",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x40",
"EventName": "sdir-lookup",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x41",
"EventName": "edir-lookup",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x42",
"EventName": "sdir-hit",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x43",
"EventName": "edir-hit",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4c",
"EventName": "sdir-home-migrate",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4d",
"EventName": "edir-home-migrate",
"Unit": "hisi_sccl,hha"
}
]
[
{
"EventCode": "0x00",
"EventName": "uncore_hisi_l3c.rd_cpipe",
"ConfigCode": "0x00",
"EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"PublicDescription": "Total read accesses",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x01",
"EventName": "uncore_hisi_l3c.wr_cpipe",
"ConfigCode": "0x01",
"EventName": "wr_cpipe",
"BriefDescription": "Total write accesses",
"PublicDescription": "Total write accesses",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x02",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
"ConfigCode": "0x02",
"EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x03",
"EventName": "uncore_hisi_l3c.wr_hit_cpipe",
"ConfigCode": "0x03",
"EventName": "wr_hit_cpipe",
"BriefDescription": "Total write hits",
"PublicDescription": "Total write hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x04",
"EventName": "uncore_hisi_l3c.victim_num",
"ConfigCode": "0x04",
"EventName": "victim_num",
"BriefDescription": "l3c precharge commands",
"PublicDescription": "l3c precharge commands",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x20",
"EventName": "uncore_hisi_l3c.rd_spipe",
"ConfigCode": "0x20",
"EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x21",
"EventName": "uncore_hisi_l3c.wr_spipe",
"ConfigCode": "0x21",
"EventName": "wr_spipe",
"BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x22",
"EventName": "uncore_hisi_l3c.rd_hit_spipe",
"ConfigCode": "0x22",
"EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x23",
"EventName": "uncore_hisi_l3c.wr_hit_spipe",
"ConfigCode": "0x23",
"EventName": "wr_hit_spipe",
"BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x29",
"EventName": "uncore_hisi_l3c.back_invalid",
"ConfigCode": "0x29",
"EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"PublicDescription": "Count of the number of L3C back invalid operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x40",
"EventName": "uncore_hisi_l3c.retry_cpu",
"ConfigCode": "0x40",
"EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x41",
"EventName": "uncore_hisi_l3c.retry_ring",
"ConfigCode": "0x41",
"EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x42",
"EventName": "uncore_hisi_l3c.prefetch_drop",
"ConfigCode": "0x42",
"EventName": "prefetch_drop",
"BriefDescription": "Count of the number of prefetch drops from this L3C",
"PublicDescription": "Count of the number of prefetch drops from this L3C",
"Unit": "hisi_sccl,l3c"
......
[
{
"EventCode": "0x00",
"EventName": "ddrc_cycles",
"BriefDescription": "Count of DDRC cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x01",
"EventName": "act_cnt",
"BriefDescription": "count of DDRC active commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x07",
"EventName": "priority_cmd",
"BriefDescription": "count of DMC commands with the highest priority",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x0b",
"EventName": "pre_act",
"BriefDescription": "count of DDRC pre-active commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x41",
"EventName": "read_cmd",
"BriefDescription": "count of DDRC read commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x44",
"EventName": "write_cmd",
"BriefDescription": "count of DDRC write commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x80",
"EventName": "read_cmd_occupancy",
"BriefDescription": "count of cycles occupied by the read command in the queue",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x81",
"EventName": "write_cmd_occupancy",
"BriefDescription": "count of cycles occupied by the write command in the queue",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x83",
"EventName": "flux_wr",
"BriefDescription": "DDRC write commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"EventCode": "0x84",
"EventName": "flux_rd",
"BriefDescription": "DDRC read commands",
"Compat": "0x00000030",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "flux_wr * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC memory write(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw_write",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "flux_rd * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC memory read(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw_read",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "(flux_wr + flux_rd) * 32 / duration_time",
"BriefDescription": "Average bandwidth of DDRC (including memory read and write)(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_bw",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "read_cmd_occupancy / read_cmd",
"BriefDescription": "Average delay of DDRC read command scheduling",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_read_lat",
"Unit": "hisi_sccl,ddrc"
},
{
"MetricExpr": "write_cmd_occupancy / write_cmd",
"BriefDescription": "Average delay of DDRC write command scheduling",
"Compat": "0x00000030",
"MetricGroup": "DDRC",
"MetricName": "ddrc_write_lat",
"Unit": "hisi_sccl,ddrc"
}
]
\ No newline at end of file
[
{
"EventCode": "0x00",
"EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x01",
"EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x02",
"EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1c",
"EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1d",
"EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1e",
"EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x1f",
"EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x2e",
"EventName": "hha_retry",
"BriefDescription": "Count of the HHA retry",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventCode": "0x55",
"EventName": "hha_cycles",
"BriefDescription": "Count of the HHA cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "(rd_ddr_64b * 64 + rd_ddr_128b *128) / duration_time",
"BriefDescription": "Average bandwidth of reading DDRC(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "hha_read_ddrc_bw",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "(wr_ddr_64b * 64 + wr_ddr_128b *128) / duration_time",
"BriefDescription": "Average bandwidth of writing DDRC(Byte/s)",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "hha_write_ddrc_bw",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "rx_outer / rx_ops_num",
"BriefDescription": "Rate of cross-chip operations received by HHA",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "cross_chip_ops_rate",
"Unit": "hisi_sccl,hha"
},
{
"MetricExpr": "rx_sccl / rx_ops_num",
"BriefDescription": "Rate of cross-die operations received by HHA",
"Compat": "0x00000030",
"MetricGroup": "HHA",
"MetricName": "cross_die_ops_rate",
"Unit": "hisi_sccl,hha"
}
]
\ No newline at end of file
[
{
"EventCode": "0x00",
"EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x02",
"EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x20",
"EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x22",
"EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x29",
"EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x40",
"EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x41",
"EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x48",
"EventName": "l3c_hit",
"BriefDescription": "Count of the total number of L3C hit",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x80",
"EventName": "read_sum",
"Filter": "tt_core=0xff",
"BriefDescription": "total time taken by CPU to read L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xb8",
"EventName": "l3c_ref",
"BriefDescription": "Count of the total number of CPU accessed L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xb9",
"EventName": "dat_access",
"BriefDescription": "Count of the total number of CPU accessed L3C",
"Filter": "tt_core=0xff",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xc0",
"EventName": "read_alloc",
"Filter": "tt_core=0xff",
"BriefDescription": "Count of the requests issued by CPU received by L3C",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0xce",
"EventName": "retry_alloc",
"Filter": "tt_core=0xff",
"BriefDescription": "Count of the L3C retry CPU",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x7f",
"EventName": "l3c_cycles",
"BriefDescription": "Count of L3C cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,l3c"
},
{
"MetricExpr": "l3c_hit / l3c_ref",
"BriefDescription": "hit rate of L3C",
"Compat": "0x00000030",
"MetricGroup": "L3Cache",
"MetricName": "l3c_hit_rate",
"Unit": "hisi_sccl,l3c"
},
{
"MetricExpr": "read_sum / (l3c_ref - retry_alloc)",
"BriefDescription": "Average latency of CPU reading L3",
"Compat": "0x00000030",
"MetricGroup": "L3Cache",
"MetricName": "l3c_latency",
"Unit": "hisi_sccl,l3c"
}
]
\ No newline at end of file
[
{
"EventCode": "0x78",
"EventName": "pa_cycles",
"BriefDescription": "Count of PA cycles",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventName": "cycle",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x40",
"EventName": "rx_req",
"BriefDescription": "Count of the requests received by PA link0",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x41",
"EventName": "rx_req_link1",
"BriefDescription": "Count of the requests received by PA link1",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x42",
"EventName": "rx_req_link2",
"BriefDescription": "Count of the requests received by PA link2",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x43",
"EventName": "rx_req_link3",
"BriefDescription": "Count of the requests received by PA link3",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5c",
"EventName": "tx_req",
"BriefDescription": "Count of the requests transmitted by PA link0",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5d",
"EventName": "tx_req_link1",
"BriefDescription": "Count of the requests transmitted by PA link1",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5e",
"EventName": "tx_req_link2",
"BriefDescription": "Count of the requests transmitted by PA link2",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"EventCode": "0x5f",
"EventName": "tx_req_link3",
"BriefDescription": "Count of the requests transmitted by PA link3",
"Compat": "0x00000030",
"Unit": "hisi_sicl,pa"
},
{
"MetricExpr": "(rx_req + rx_req_link1 + rx_req_link2 + rx_req_link3) / duration_time",
"BriefDescription": "Average bandwidth of PA receive requests",
"Compat": "0x00000030",
"MetricGroup": "PA",
"MetricName": "pa_rx_req_bw",
"Unit": "hisi_sicl,pa"
},
{
"MetricExpr": "(tx_req + tx_req_link1 + tx_req_link2 + tx_req_link3) / duration_time",
"BriefDescription": "Average bandwidth of PA transmitted requests",
"Compat": "0x00000030",
"MetricGroup": "PA",
"MetricName": "pa_tx_req_bw",
"Unit": "hisi_sicl,pa"
}
]
\ No newline at end of file
[
{
"EventCode": "0x09",
"EventName": "sllc_cycles",
"BriefDescription": "Count of SLLC cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventName": "cycles",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x20",
"EventName": "rx_req_sum",
"BriefDescription": "total cycles SLLC taken to receive requests",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x21",
"EventName": "rx_data_sum",
"BriefDescription": "total cycles SLLC taken to receive data",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x24",
"EventName": "tx_req_sum",
"BriefDescription": "total cycles SLLC taken to transmit requests",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x25",
"EventName": "tx_data_sum",
"BriefDescription": "total cycles SLLC taken to transmit data",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x30",
"EventName": "rx_req",
"BriefDescription": "Count of the requests received by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x31",
"EventName": "rx_data",
"BriefDescription": "Count of the data received by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x34",
"EventName": "tx_req",
"BriefDescription": "Count of the requests transmitted by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"EventCode": "0x35",
"EventName": "tx_data",
"BriefDescription": "Count of the data transmitted by SLLC",
"Compat": "0x00000030",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_req_sum / rx_req",
"BriefDescription": "Average latency of SLLC receive requests(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_req_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_data_sum / rx_data",
"BriefDescription": "Average latency of SLLC receive data(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_data_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_req_sum / tx_req",
"BriefDescription": "Average latency of SLLC transmit requests(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_req_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_data_sum / tx_data",
"BriefDescription": "Average latency of SLLC transmit data(cycles)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_data_lat",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_req * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC receive requests(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_req_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "rx_data * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC receive data(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_rx_data_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_req * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC transmit requests(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_req_bw",
"Unit": "hisi_sccl,sllc"
},
{
"MetricExpr": "tx_data * 332 / duration_time",
"BriefDescription": "Average bandwidth of SLLC transmit data(bits/s)",
"Compat": "0x00000030",
"MetricGroup": "SLLC",
"MetricName": "sllc_tx_data_bw",
"Unit": "hisi_sccl,sllc"
}
]
\ No newline at end of file
[
{
"PublicDescription": "Attributable Level 3 cache access, read",
"EventCode": "0x40",
"EventName": "L3_CACHE_RD",
"BriefDescription": "L3 cache access, read"
}
]
[
{
"ArchStdEvent": "L3_CACHE_RD"
}
]
\ No newline at end of file
......@@ -17,5 +17,26 @@
"CounterMask": "0",
"Invert": "0",
"EdgeDetect": "0"
},
{
"EventCode": "0x7",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
"EventCode": "0x12",
"EventName": "uncore_imc_free_running.cache_miss",
"BriefDescription": "Total cache misses",
"PublicDescription": "Total cache misses",
"Unit": "imc_free_running"
},
{
"EventCode": "0x34",
"EventName": "uncore_imc.cache_hits",
"BriefDescription": "Total cache hits",
"PublicDescription": "Total cache hits",
"Unit": "imc"
}
]
[
{
"BriefDescription": "ddr write-cycles event",
"EventCode": "0x2b",
"EventName": "sys_ddr_pmu.write_cycles",
"Unit": "sys_ddr_pmu",
"Compat": "v8"
},
{
"EventCode": "0x84",
"EventName": "sys_hisi_ddrc.flux_rd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "sys_hisi_ddrc",
"Compat": "hip09"
}
]
......@@ -55,6 +55,7 @@ char *prog;
struct json_event {
char *name;
char *compat;
char *event;
char *desc;
char *long_desc;
......@@ -82,6 +83,23 @@ enum aggr_mode_class convert(const char *aggr_mode)
typedef int (*func)(void *data, struct json_event *je);
static LIST_HEAD(sys_event_tables);
struct sys_event_table {
struct list_head list;
char *soc_id;
};
static void free_sys_event_tables(void)
{
struct sys_event_table *et, *next;
list_for_each_entry_safe(et, next, &sys_event_tables, list) {
free(et->soc_id);
free(et);
}
}
int eprintf(int level, int var, const char *fmt, ...)
{
......@@ -263,6 +281,7 @@ static struct map {
{ "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
{ "hisi_sccl,hha", "hisi_sccl,hha" },
{ "hisi_sccl,l3c", "hisi_sccl,l3c" },
{ "hisi_sccl,sllc", "hisi_sccl,sllc" },
{ "L3PMC", "amd_l3" },
{ "DFPMC", "amd_df" },
{}
......@@ -360,6 +379,8 @@ static int print_events_table_entry(void *data, struct json_event *je)
if (je->event)
fprintf(outfp, "\t.event = \"%s\",\n", je->event);
fprintf(outfp, "\t.desc = \"%s\",\n", je->desc);
if (je->compat)
fprintf(outfp, "\t.compat = \"%s\",\n", je->compat);
fprintf(outfp, "\t.topic = \"%s\",\n", topic);
if (je->long_desc && je->long_desc[0])
fprintf(outfp, "\t.long_desc = \"%s\",\n", je->long_desc);
......@@ -390,6 +411,7 @@ struct event_struct {
struct list_head list;
char *name;
char *event;
char *compat;
char *desc;
char *long_desc;
char *pmu;
......@@ -550,10 +572,12 @@ static int json_events(const char *fn,
struct json_event je = {};
char *arch_std = NULL;
unsigned long long eventcode = 0;
unsigned long long configcode = 0;
struct msrmap *msr = NULL;
jsmntok_t *msrval = NULL;
jsmntok_t *precise = NULL;
jsmntok_t *obj = tok++;
bool configcode_present = false;
EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
for (j = 0; j < obj->size; j += 2) {
......@@ -576,6 +600,12 @@ static int json_events(const char *fn,
addfield(map, &code, "", "", val);
eventcode |= strtoul(code, NULL, 0);
free(code);
} else if (json_streq(map, field, "ConfigCode")) {
char *code = NULL;
addfield(map, &code, "", "", val);
configcode |= strtoul(code, NULL, 0);
free(code);
configcode_present = true;
} else if (json_streq(map, field, "ExtSel")) {
char *code = NULL;
addfield(map, &code, "", "", val);
......@@ -583,6 +613,8 @@ static int json_events(const char *fn,
free(code);
} else if (json_streq(map, field, "EventName")) {
addfield(map, &je.name, "", "", val);
} else if (json_streq(map, field, "Compat")) {
addfield(map, &je.compat, "", "", val);
} else if (json_streq(map, field, "BriefDescription")) {
addfield(map, &je.desc, "", "", val);
fixdesc(je.desc);
......@@ -655,6 +687,9 @@ static int json_events(const char *fn,
addfield(map, &extra_desc, " ",
"(Precise event)", NULL);
}
if (configcode_present)
snprintf(buf, sizeof buf, "config=%#llx", configcode);
else
snprintf(buf, sizeof buf, "event=%#llx", eventcode);
addfield(map, &event, ",", buf, NULL);
if (je.desc && extra_desc)
......@@ -683,6 +718,7 @@ static int json_events(const char *fn,
free(event);
free(je.desc);
free(je.name);
free(je.compat);
free(je.long_desc);
free(extra_desc);
free(je.pmu);
......@@ -747,6 +783,15 @@ static char *file_name_to_table_name(char *fname)
return tblname;
}
static bool is_sys_dir(char *fname)
{
size_t len = strlen(fname), len2 = strlen("/sys");
if (len2 > len)
return false;
return !strcmp(fname+len-len2, "/sys");
}
static void print_mapping_table_prefix(FILE *outfp)
{
fprintf(outfp, "struct pmu_events_map pmu_events_map[] = {\n");
......@@ -777,10 +822,38 @@ static void print_mapping_test_table(FILE *outfp)
fprintf(outfp, "\t.cpuid = \"testcpu\",\n");
fprintf(outfp, "\t.version = \"v1\",\n");
fprintf(outfp, "\t.type = \"core\",\n");
fprintf(outfp, "\t.table = pme_test_cpu,\n");
fprintf(outfp, "\t.table = pme_test_soc_cpu,\n");
fprintf(outfp, "},\n");
}
static void print_system_event_mapping_table_prefix(FILE *outfp)
{
fprintf(outfp, "\nstruct pmu_sys_events pmu_sys_event_tables[] = {");
}
static void print_system_event_mapping_table_suffix(FILE *outfp)
{
fprintf(outfp, "\n\t{\n\t\t.table = 0\n\t},");
fprintf(outfp, "\n};\n");
}
static int process_system_event_tables(FILE *outfp)
{
struct sys_event_table *sys_event_table;
print_system_event_mapping_table_prefix(outfp);
list_for_each_entry(sys_event_table, &sys_event_tables, list) {
fprintf(outfp, "\n\t{\n\t\t.table = %s,\n\t\t.name = \"%s\",\n\t},",
sys_event_table->soc_id,
sys_event_table->soc_id);
}
print_system_event_mapping_table_suffix(outfp);
return 0;
}
static int process_mapfile(FILE *outfp, char *fpath)
{
int n = 16384;
......@@ -886,6 +959,8 @@ static void create_empty_mapping(const char *output_file)
fprintf(outfp, "#include \"pmu-events/pmu-events.h\"\n");
print_mapping_table_prefix(outfp);
print_mapping_table_suffix(outfp);
print_system_event_mapping_table_prefix(outfp);
print_system_event_mapping_table_suffix(outfp);
fclose(outfp);
}
......@@ -978,15 +1053,20 @@ static int process_one_file(const char *fpath, const struct stat *sb,
int level = ftwbuf->level;
int err = 0;
if (level == 2 && is_dir) {
if (level >= 2 && is_dir) {
int count = 0;
/*
* For level 2 directory, bname will include parent name,
* like vendor/platform. So search back from platform dir
* to find this.
* Something similar for level 3 directory, but we're a PMU
* category folder, like vendor/platform/cpu.
*/
bname = (char *) fpath + ftwbuf->base - 2;
for (;;) {
if (*bname == '/')
count++;
if (count == level - 1)
break;
bname--;
}
......@@ -999,13 +1079,13 @@ static int process_one_file(const char *fpath, const struct stat *sb,
level, sb->st_size, bname, fpath);
/* base dir or too deep */
if (level == 0 || level > 3)
if (level == 0 || level > 4)
return 0;
/* model directory, reset topic */
if ((level == 1 && is_dir && is_leaf_dir(fpath)) ||
(level == 2 && is_dir)) {
(level >= 2 && is_dir && is_leaf_dir(fpath))) {
if (close_table)
print_events_table_suffix(eventsfp);
......@@ -1021,6 +1101,22 @@ static int process_one_file(const char *fpath, const struct stat *sb,
return -1;
}
if (is_sys_dir(bname)) {
struct sys_event_table *sys_event_table;
sys_event_table = malloc(sizeof(*sys_event_table));
if (!sys_event_table)
return -1;
sys_event_table->soc_id = strdup(tblname);
if (!sys_event_table->soc_id) {
free(sys_event_table);
return -1;
}
list_add_tail(&sys_event_table->list,
&sys_event_tables);
}
print_events_table_prefix(eventsfp, tblname);
return 0;
}
......@@ -1162,6 +1258,10 @@ int main(int argc, char *argv[])
sprintf(ldirname, "%s/test", start_dirname);
rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
if (rc)
goto err_processing_std_arch_event_dir;
rc = nftw(ldirname, process_one_file, maxfds, 0);
if (rc)
goto err_processing_dir;
......@@ -1176,10 +1276,16 @@ int main(int argc, char *argv[])
}
rc = process_mapfile(eventsfp, mapfile);
fclose(eventsfp);
if (rc) {
pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
/* Make build fail */
ret = 1;
goto err_close_eventsfp;
}
rc = process_system_event_tables(eventsfp);
fclose(eventsfp);
if (rc) {
ret = 1;
goto err_out;
}
......
......@@ -12,6 +12,7 @@ enum aggr_mode_class {
*/
struct pmu_event {
const char *name;
const char *compat;
const char *event;
const char *desc;
const char *topic;
......@@ -43,10 +44,16 @@ struct pmu_events_map {
struct pmu_event *table;
};
struct pmu_sys_events {
const char *name;
struct pmu_event *table;
};
/*
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
extern struct pmu_events_map pmu_events_map[];
extern struct pmu_sys_events pmu_sys_event_tables[];
#endif
......@@ -605,7 +605,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 10);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
/* config1 */
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -614,7 +614,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 1);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config1"));
/* config2=3 */
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -623,7 +623,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 3);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config2"));
/* umask=1*/
term = list_entry(term->list.next, struct parse_events_term, list);
......@@ -661,7 +661,7 @@ static int test__checkterms_simple(struct list_head *terms)
TEST_ASSERT_VAL("wrong type val",
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
TEST_ASSERT_VAL("wrong config", !term->config);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
return 0;
}
......
此差异已折叠。
......@@ -180,11 +180,22 @@ void evlist__remove(struct evlist *evlist, struct evsel *evsel)
void perf_evlist__splice_list_tail(struct evlist *evlist,
struct list_head *list)
{
struct evsel *evsel, *temp;
while (!list_empty(list)) {
struct evsel *evsel, *temp, *leader = NULL;
__evlist__for_each_entry_safe(list, temp, evsel) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
leader = evsel;
break;
}
__evlist__for_each_entry_safe(list, temp, evsel) {
if (evsel->leader == leader) {
list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
}
}
}
}
......
......@@ -279,7 +279,9 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
* when then group is left.
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader)
ev->leader != metric_events[i]->leader &&
!strcmp(ev->leader->pmu_name,
metric_events[i]->leader->pmu_name))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
......@@ -413,6 +415,12 @@ static bool match_metric(const char *n, const char *list)
return false;
}
static bool match_pe_metric(struct pmu_event *pe, const char *metric)
{
return match_metric(pe->metric_group, metric) ||
match_metric(pe->metric_name, metric);
}
struct mep {
struct rb_node nd;
const char *name;
......@@ -491,49 +499,29 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
putchar('\n');
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details)
static int metricgroup__print_pmu_event(struct pmu_event *pe,
bool metricgroups, char *filter,
bool raw, bool details,
struct rblist *groups,
struct strlist *metriclist)
{
struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
struct rblist groups;
struct rb_node *node, *next;
struct strlist *metriclist = NULL;
if (!map)
return;
if (!metricgroups) {
metriclist = strlist__new(NULL, NULL);
if (!metriclist)
return;
}
rblist__init(&groups);
groups.node_new = mep_new;
groups.node_cmp = mep_cmp;
groups.node_delete = mep_delete;
for (i = 0; ; i++) {
const char *g;
pe = &map->table[i];
char *omg, *mg;
if (!pe->name && !pe->metric_group && !pe->metric_name)
break;
if (!pe->metric_expr)
continue;
g = pe->metric_group;
if (!g && pe->metric_name) {
if (pe->name)
continue;
return 0;
g = "No_group";
}
if (g) {
char *omg;
char *mg = strdup(g);
if (!g)
return 0;
mg = strdup(g);
if (!mg)
return;
return -ENOMEM;
omg = mg;
while ((g = strsep(&mg, ";")) != NULL) {
struct mep *me;
......@@ -549,12 +537,11 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else {
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
return -1;
if (details) {
if (asprintf(&s, "%s\n%*s%s]",
s, 8, "[", pe->metric_expr) < 0)
return;
return -1;
}
}
......@@ -564,7 +551,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (!metricgroups) {
strlist__add(metriclist, s);
} else {
me = mep_lookup(&groups, g);
me = mep_lookup(groups, g);
if (!me)
continue;
strlist__add(me->metrics, s);
......@@ -574,7 +561,100 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
free(s);
}
free(omg);
return 0;
}
struct metricgroup_print_sys_idata {
struct strlist *metriclist;
char *filter;
struct rblist *groups;
bool metricgroups;
bool raw;
bool details;
};
typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
struct metricgroup_iter_data {
metricgroup_sys_event_iter_fn fn;
void *data;
};
static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
{
struct metricgroup_iter_data *d = data;
struct perf_pmu *pmu = NULL;
if (!pe->metric_expr || !pe->compat)
return 0;
while ((pmu = perf_pmu__scan(pmu))) {
if (!pmu->id || strcmp(pmu->id, pe->compat))
continue;
return d->fn(pe, d->data);
}
return 0;
}
static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
{
struct metricgroup_print_sys_idata *d = data;
return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
d->details, d->groups, d->metriclist);
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details)
{
struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
struct rblist groups;
struct rb_node *node, *next;
struct strlist *metriclist = NULL;
if (!metricgroups) {
metriclist = strlist__new(NULL, NULL);
if (!metriclist)
return;
}
rblist__init(&groups);
groups.node_new = mep_new;
groups.node_cmp = mep_cmp;
groups.node_delete = mep_delete;
for (i = 0; map; i++) {
pe = &map->table[i];
if (!pe->name && !pe->metric_group && !pe->metric_name)
break;
if (!pe->metric_expr)
continue;
if (metricgroup__print_pmu_event(pe, metricgroups, filter,
raw, details, &groups,
metriclist) < 0)
return;
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__print_sys_event_iter,
.data = (void *) &(struct metricgroup_print_sys_idata){
.metriclist = metriclist,
.metricgroups = metricgroups,
.filter = filter,
.raw = raw,
.details = details,
.groups = &groups,
},
};
pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
}
if (!filter || !rblist__empty(&groups)) {
......@@ -683,6 +763,16 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
return 1;
}
struct metricgroup_add_iter_data {
struct list_head *metric_list;
const char *metric;
struct metric **m;
struct expr_ids *ids;
int *ret;
bool *has_match;
bool metric_no_group;
};
static int __add_metric(struct list_head *metric_list,
struct pmu_event *pe,
bool metric_no_group,
......@@ -793,6 +883,7 @@ static int __add_metric(struct list_head *metric_list,
}
#define map_for_each_event(__pe, __idx, __map) \
if (__map) \
for (__idx = 0, __pe = &__map->table[__idx]; \
__pe->name || __pe->metric_group || __pe->metric_name; \
__pe = &__map->table[++__idx])
......@@ -964,6 +1055,29 @@ static int add_metric(struct list_head *metric_list,
return ret;
}
static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
void *data)
{
struct metricgroup_add_iter_data *d = data;
int ret;
if (!match_pe_metric(pe, d->metric))
return 0;
ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
if (ret)
return ret;
ret = resolve_metric(d->metric_no_group,
d->metric_list, NULL, d->ids);
if (ret)
return ret;
*(d->has_match) = true;
return *d->ret;
}
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
struct strbuf *events,
struct list_head *metric_list,
......@@ -994,6 +1108,22 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
goto out;
}
{
struct metricgroup_iter_data data = {
.fn = metricgroup__add_metric_sys_event_iter,
.data = (void *) &(struct metricgroup_add_iter_data) {
.metric_list = &list,
.metric = metric,
.metric_no_group = metric_no_group,
.m = &m,
.ids = &ids,
.has_match = &has_match,
.ret = &ret,
},
};
pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
}
/* End of pmu events. */
if (!has_match) {
ret = -EINVAL;
......@@ -1120,9 +1250,6 @@ int metricgroup__parse_groups(const struct option *opt,
struct evlist *perf_evlist = *(struct evlist **)opt->value;
struct pmu_events_map *map = pmu_events_map__find();
if (!map)
return 0;
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, map);
}
......
......@@ -2967,7 +2967,7 @@ int parse_events_term__num(struct parse_events_term **term,
struct parse_events_term temp = {
.type_val = PARSE_EVENTS__TERM_TYPE_NUM,
.type_term = type_term,
.config = config,
.config = config ? : strdup(config_term_names[type_term]),
.no_value = no_value,
.err_term = loc_term ? loc_term->first_column : 0,
.err_val = loc_val ? loc_val->first_column : 0,
......
......@@ -312,7 +312,7 @@ PE_NAME opt_pmu_config
if (!strncmp(name, "uncore_", 7) &&
strncmp($1, "uncore_", 7))
name += 7;
if (!fnmatch(pattern, name, 0)) {
if (!perf_pmu__match(pattern, name, $1)) {
if (parse_events_copy_term_list(orig_terms, &terms))
CLEANUP_YYABORT;
if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
......
......@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include <linux/ctype.h>
#include <subcmd/pager.h>
#include <sys/types.h>
#include <errno.h>
......@@ -17,6 +18,7 @@
#include <locale.h>
#include <regex.h>
#include <perf/cpumap.h>
#include <fnmatch.h>
#include "debug.h"
#include "evsel.h"
#include "pmu.h"
......@@ -283,6 +285,7 @@ void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->str);
zfree(&newalias->metric_expr);
zfree(&newalias->metric_name);
zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
}
......@@ -297,6 +300,10 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
list_for_each_entry(a, alist, list) {
if (!strcasecmp(newalias->name, a->name)) {
if (newalias->pmu_name && a->pmu_name &&
!strcasecmp(newalias->pmu_name, a->pmu_name)) {
continue;
}
perf_pmu_update_alias(a, newalias);
perf_pmu_free_alias(newalias);
return true;
......@@ -382,6 +389,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
}
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
alias->str = strdup(newval);
alias->pmu_name = metric_name ? strdup(metric_name) : NULL;
if (deprecated)
alias->deprecated = true;
......@@ -597,6 +605,7 @@ static struct perf_cpu_map *__pmu_cpumask(const char *path)
* Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
* may have a "cpus" file.
*/
#define SYS_TEMPLATE_ID "./bus/event_source/devices/%s/identifier"
#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
......@@ -635,6 +644,21 @@ static bool pmu_is_uncore(const char *name)
return file_available(path);
}
static char *pmu_id(const char *name)
{
char path[PATH_MAX], *str;
size_t len;
snprintf(path, PATH_MAX, SYS_TEMPLATE_ID, name);
if (sysfs__read_str(path, &str, &len) < 0)
return NULL;
str[len - 1] = 0; /* remove line feed */
return str;
}
/*
* PMU CORE devices have different name other than cpu in sysfs on some
* platforms.
......@@ -701,6 +725,35 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
/*
* Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name
* to be valid.
*/
static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok)
{
const char *p;
if (strncmp(pmu_name, tok, strlen(tok)))
return false;
p = pmu_name + strlen(tok);
if (*p == 0)
return true;
if (*p == '_')
++p;
/* Ensure we end in a number */
while (1) {
if (!isdigit(*p) && (*p != '_'))
return false;
if (*(++p) == 0)
break;
}
return true;
}
struct pmu_events_map *__weak pmu_events_map__find(void)
{
return perf_pmu__find_map(NULL);
......@@ -732,12 +785,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
* match "socket" in "socketX_pmunameY" and then "pmuname" in
* "pmunameY".
*/
for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) {
while (1) {
char *next_tok = strtok_r(NULL, ",", &tmp);
name = strstr(name, tok);
if (!name) {
if (!name ||
(!next_tok && !perf_pmu__valid_suffix(name, tok))) {
res = false;
goto out;
}
if (!next_tok)
break;
tok = next_tok;
name += strlen(tok);
}
res = true;
......@@ -771,8 +831,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
break;
}
if (pmu_is_uncore(name) &&
pmu_uncore_alias_match(pname, name))
if (pmu->is_uncore && pmu_uncore_alias_match(pname, name))
goto new_alias;
if (strcmp(pname, name))
......@@ -801,6 +860,83 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
pmu_add_cpu_aliases_map(head, pmu, map);
}
void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data)
{
int i = 0;
while (1) {
struct pmu_sys_events *event_table;
int j = 0;
event_table = &pmu_sys_event_tables[i++];
if (!event_table->table)
break;
while (1) {
struct pmu_event *pe = &event_table->table[j++];
int ret;
if (!pe->name && !pe->metric_group && !pe->metric_name)
break;
ret = fn(pe, data);
if (ret)
break;
}
}
}
struct pmu_sys_event_iter_data {
struct list_head *head;
struct perf_pmu *pmu;
};
static int pmu_add_sys_aliases_iter_fn(struct pmu_event *pe, void *data)
{
struct pmu_sys_event_iter_data *idata = data;
struct perf_pmu *pmu = idata->pmu;
if (!pe->name) {
if (pe->metric_group || pe->metric_name)
return 0;
return -EINVAL;
}
if (!pe->compat || !pe->pmu)
return 0;
if (!strcmp(pmu->id, pe->compat) &&
pmu_uncore_alias_match(pe->pmu, pmu->name)) {
__perf_pmu__new_alias(idata->head, NULL,
(char *)pe->name,
(char *)pe->desc,
(char *)pe->event,
(char *)pe->long_desc,
(char *)pe->topic,
(char *)pe->unit,
(char *)pe->perpkg,
(char *)pe->metric_expr,
(char *)pe->metric_name,
(char *)pe->deprecated);
}
return 0;
}
void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu)
{
struct pmu_sys_event_iter_data idata = {
.head = head,
.pmu = pmu,
};
if (!pmu->id)
return;
pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, &idata);
}
struct perf_event_attr * __weak
perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
......@@ -852,8 +988,11 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
......@@ -1721,3 +1860,14 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
return nr_caps;
}
int perf_pmu__match(char *pattern, char *name, char *tok)
{
if (fnmatch(pattern, name, 0))
return -1;
if (tok && !perf_pmu__valid_suffix(name, tok))
return -1;
return 0;
}
......@@ -30,6 +30,7 @@ struct perf_pmu_caps {
struct perf_pmu {
char *name;
char *id;
__u32 type;
bool selectable;
bool is_uncore;
......@@ -71,10 +72,12 @@ struct perf_pmu_alias {
bool deprecated;
char *metric_expr;
char *metric_name;
char *pmu_name;
};
struct perf_pmu *perf_pmu__find(const char *name);
struct perf_pmu *perf_pmu__find_by_type(unsigned int type);
void pmu_add_sys_aliases(struct list_head *head, struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms,
struct parse_events_error *error);
......@@ -117,8 +120,12 @@ struct pmu_events_map *pmu_events_map__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
typedef int (*pmu_sys_event_iter_fn)(struct pmu_event *pe, void *data);
void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu);
int perf_pmu__match(char *pattern, char *name, char *tok);
#endif /* __PMU_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册