提交 0fcdc966 编写于 作者: X Xie XiuQi

arm64/mpam: call mpam_sched_in with context switch

hulk inclusion
category: feature
bugzilla: 5510
CVE: NA
Signed-off-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 583a93c7
......@@ -6,8 +6,6 @@
#include <linux/sched.h>
#include <linux/jump_label.h>
#include <asm/mpam.h>
/**
* struct intel_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
......@@ -32,64 +30,8 @@ struct intel_pqr_state {
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
/*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
* - This will stay as no-op unless we are running on an Intel SKU
* which supports resource control or monitoring and we enable by
* mounting the resctrl file system.
* - Caches the per cpu CLOSid/RMID values and does the MSR write only
* when a task with a different CLOSid/RMID is scheduled in.
* - We allocate RMIDs/CLOSids globally in order to keep this as
* simple as possible.
* Must be called with preemption disabled.
*/
static void __mpam_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
u64 partid = state->default_closid;
u64 pmg = state->default_rmid;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&resctrl_alloc_enable_key)) {
if (current->closid)
partid = current->closid;
}
if (static_branch_likely(&resctrl_mon_enable_key)) {
if (current->rmid)
pmg = current->rmid;
}
if (partid != state->cur_closid || pmg != state->cur_rmid) {
u64 reg;
state->cur_closid = partid;
state->cur_rmid = pmg;
/* set in EL0 */
reg = mpam_read_sysreg_s(SYS_MPAM0_EL1, "SYS_MPAM0_EL1");
reg = PARTID_SET(reg, partid);
reg = PMG_SET(reg, pmg);
mpam_write_sysreg_s(reg, SYS_MPAM0_EL1, "SYS_MPAM0_EL1");
/* set in EL1 */
reg = mpam_read_sysreg_s(SYS_MPAM1_EL1, "SYS_MPAM1_EL1");
reg = PARTID_SET(reg, partid);
reg = PMG_SET(reg, pmg);
mpam_write_sysreg_s(reg, SYS_MPAM1_EL1, "SYS_MPAM1_EL1");
/* [FIXME] set in EL2 (hard code for VHE enabed) */
reg = mpam_read_sysreg_s(SYS_MPAM2_EL2, "SYS_MPAM2_EL2");
reg = PARTID_SET(reg, partid);
reg = PMG_SET(reg, pmg);
mpam_write_sysreg_s(reg, SYS_MPAM2_EL2, "SYS_MPAM2_EL2");
}
}
extern void __mpam_sched_in(void);
DECLARE_STATIC_KEY_FALSE(resctrl_enable_key);
static inline void mpam_sched_in(void)
{
......
......@@ -926,3 +926,56 @@ static int __init mpam_late_init(void)
}
late_initcall(mpam_late_init);
/*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
* - This will stay as no-op unless we are running on an Intel SKU
* which supports resource control or monitoring and we enable by
* mounting the resctrl file system.
* - Caches the per cpu CLOSid/RMID values and does the MSR write only
* when a task with a different CLOSid/RMID is scheduled in.
* - We allocate RMIDs/CLOSids globally in order to keep this as
* simple as possible.
* Must be called with preemption disabled.
*/
void __mpam_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
u64 partid = state->default_closid;
u64 pmg = state->default_rmid;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&resctrl_alloc_enable_key)) {
if (current->closid)
partid = current->closid;
}
if (static_branch_likely(&resctrl_mon_enable_key)) {
if (current->rmid)
pmg = current->rmid;
}
if (partid != state->cur_closid || pmg != state->cur_rmid) {
u64 reg;
state->cur_closid = partid;
state->cur_rmid = pmg;
/* set in EL0 */
reg = mpam_read_sysreg_s(SYS_MPAM0_EL1, "SYS_MPAM0_EL1");
reg = PARTID_SET(reg, partid);
reg = PMG_SET(reg, pmg);
mpam_write_sysreg_s(reg, SYS_MPAM0_EL1, "SYS_MPAM0_EL1");
/* set in EL1 */
reg = mpam_read_sysreg_s(SYS_MPAM1_EL1, "SYS_MPAM1_EL1");
reg = PARTID_SET(reg, partid);
reg = PMG_SET(reg, pmg);
mpam_write_sysreg_s(reg, SYS_MPAM1_EL1, "SYS_MPAM1_EL1");
}
}
......@@ -58,6 +58,7 @@
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
#include <asm/mpam_sched.h>
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
......@@ -437,6 +438,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
/* the actual thread switch */
last = cpu_switch_to(prev, next);
mpam_sched_in();
return last;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册