You need to sign in or sign up before continuing.
提交 c523a17f 编写于 作者: K Kan Liang 提交者: Yunying Sun

perf/x86/intel/uncore: Generic support for the MMIO type of uncore blocks

mainline inclusion
from mainline-v5.13-rc1
commit c4c55e36
category: feature
feature: SPR PMU uncore support
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I5BECO

Intel-SIG: commit c4c55e36 perf/x86/intel/uncore: Generic support
for the MMIO type of uncore blocks
This commit is backported for SPR PMU uncore support.

-------------------------------------

The discovery table provides the generic uncore block information
for the MMIO type of uncore blocks, which is good enough to provide
basic uncore support.

The box control field is composed of the BAR address and box control
offset. When initializing the uncore blocks, perf should ioremap the
address from the box control field.

Implement the generic support for the MMIO type of uncore block.
Signed-off-by: NKan Liang <kan.liang@linux.intel.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1616003977-90612-6-git-send-email-kan.liang@linux.intel.comSigned-off-by: NYunying Sun <yunying.sun@intel.com>
上级 8396f0ec
......@@ -1750,6 +1750,7 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
.cpu_init = intel_uncore_generic_uncore_cpu_init,
.pci_init = intel_uncore_generic_uncore_pci_init,
.mmio_init = intel_uncore_generic_uncore_mmio_init,
};
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
......
......@@ -70,6 +70,7 @@ struct intel_uncore_type {
union {
unsigned *msr_offsets;
unsigned *pci_offsets;
unsigned *mmio_offsets;
};
unsigned *box_ids;
struct event_constraint unconstrainted;
......
......@@ -442,6 +442,90 @@ static struct intel_uncore_ops generic_uncore_pci_ops = {
.read_counter = intel_generic_uncore_pci_read_counter,
};
#define UNCORE_GENERIC_MMIO_SIZE 0x4000
static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
{
struct intel_uncore_type *type = box->pmu->type;
if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
return 0;
return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
}
static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
{
unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
struct intel_uncore_type *type = box->pmu->type;
resource_size_t addr;
if (!box_ctl) {
pr_warn("Uncore type %d box %d: Invalid box control address.\n",
type->type_id, type->box_ids[box->pmu->pmu_idx]);
return;
}
addr = box_ctl;
box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
if (!box->io_addr) {
pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
type->type_id, type->box_ids[box->pmu->pmu_idx],
(unsigned long long)addr);
return;
}
writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
}
static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
{
if (!box->io_addr)
return;
writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
}
static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
{
if (!box->io_addr)
return;
writel(0, box->io_addr);
}
static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!box->io_addr)
return;
writel(hwc->config, box->io_addr + hwc->config_base);
}
static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!box->io_addr)
return;
writel(0, box->io_addr + hwc->config_base);
}
static struct intel_uncore_ops generic_uncore_mmio_ops = {
.init_box = intel_generic_uncore_mmio_init_box,
.exit_box = uncore_mmio_exit_box,
.disable_box = intel_generic_uncore_mmio_disable_box,
.enable_box = intel_generic_uncore_mmio_enable_box,
.disable_event = intel_generic_uncore_mmio_disable_event,
.enable_event = intel_generic_uncore_mmio_enable_event,
.read_counter = uncore_mmio_read_counter,
};
static bool uncore_update_uncore_type(enum uncore_access_type type_id,
struct intel_uncore_type *uncore,
struct intel_uncore_discovery_type *type)
......@@ -468,6 +552,15 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
uncore->box_ctls = type->box_ctrl_die;
uncore->pci_offsets = type->box_offset;
break;
case UNCORE_ACCESS_MMIO:
uncore->ops = &generic_uncore_mmio_ops;
uncore->perf_ctr = (unsigned int)type->ctr_offset;
uncore->event_ctl = (unsigned int)type->ctl_offset;
uncore->box_ctl = (unsigned int)type->box_ctrl;
uncore->box_ctls = type->box_ctrl_die;
uncore->mmio_offsets = type->box_offset;
uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
break;
default:
return false;
}
......@@ -522,3 +615,8 @@ int intel_uncore_generic_uncore_pci_init(void)
return 0;
}
void intel_uncore_generic_uncore_mmio_init(void)
{
uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO);
}
......@@ -128,3 +128,4 @@ bool intel_uncore_has_discovery_tables(void);
void intel_uncore_clear_discovery_tables(void);
void intel_uncore_generic_uncore_cpu_init(void);
int intel_uncore_generic_uncore_pci_init(void);
void intel_uncore_generic_uncore_mmio_init(void);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册