提交 89691ba7 编写于 作者: X Xie XiuQi

arm64/mpam: support pmg alloc/free

hulk inclusion
category: feature
bugzilla: 5510
CVE: NA
Signed-off-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 be2167d2
......@@ -321,6 +321,7 @@ struct raw_resctrl_resource {
int num_partid;
u32 default_ctrl;
void (*msr_update) (struct rdt_domain *d, int partid);
u64 (*msr_read) (struct rdt_domain *d, int partid);
int data_width;
const char *format_str;
int (*parse_ctrlval) (char *buf, struct raw_resctrl_resource *r,
......
......@@ -57,8 +57,10 @@
#define BWA_WD 6 /* hard code for P680 */
#define MBW_MAX_MASK 0xFC00
#define MBW_MAX_HARDLIM BIT(31)
#define MBW_MAX_SET(v) (MBW_MAX_HARDLIM|((v) << (15 - BWA_WD))) /* [FIXME] hard code for hardlim */
/* [FIXME] hard code for hardlim */
#define MBW_MAX_SET(v) (MBW_MAX_HARDLIM|((v) << (15 - BWA_WD)))
#define MBW_MAX_GET(v) (((v) & MBW_MAX_MASK) >> (15 - BWA_WD))
/*
* emulate the mpam nodes
* These should be reported by ACPI MPAM Table.
......
......@@ -19,9 +19,11 @@ static inline void free_mon_id(u32 id)
free_rmid(id);
}
void pmg_init(void);
static inline void resctrl_id_init(void)
{
closid_init();
pmg_init();
}
static inline int resctrl_id_alloc(void)
......
......@@ -162,21 +162,27 @@ cat_wrmsr(struct rdt_domain *d, int partid);
static void
bw_wrmsr(struct rdt_domain *d, int partid);
u64 cat_rdmsr(struct rdt_domain *d, int partid);
u64 bw_rdmsr(struct rdt_domain *d, int partid);
#define domain_init(id) LIST_HEAD_INIT(resctrl_resources_all[id].domains)
struct raw_resctrl_resource raw_resctrl_resources_all[] = {
[MPAM_RESOURCE_SMMU] = {
.msr_update = cat_wrmsr,
.msr_read = cat_rdmsr,
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
},
[MPAM_RESOURCE_CACHE] = {
.msr_update = cat_wrmsr,
.msr_read = cat_rdmsr,
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
},
[MPAM_RESOURCE_MC] = {
.msr_update = bw_wrmsr,
.msr_read = bw_rdmsr,
.parse_ctrlval = parse_cbm, /* [FIXME] add parse_bw() helper */
.format_str = "%d=%0*x",
},
......@@ -225,6 +231,22 @@ bw_wrmsr(struct rdt_domain *d, int partid)
mpam_writel(val, d->base + MPAMCFG_MBW_MAX);
}
u64 cat_rdmsr(struct rdt_domain *d, int partid)
{
mpam_writel(partid, d->base + MPAMCFG_PART_SEL);
return mpam_readl(d->base + MPAMCFG_CPBM);
}
u64 bw_rdmsr(struct rdt_domain *d, int partid)
{
u64 max;
mpam_writel(partid, d->base + MPAMCFG_PART_SEL);
max = mpam_readl(d->base + MPAMCFG_MBW_MAX);
return MBW_MAX_GET(max);
}
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
......@@ -271,10 +293,11 @@ void closid_free(int closid)
static int mpam_online_cpu(unsigned int cpu)
{
pr_info("CPU %2d: online cpu and enable mpam\n", cpu);
cpumask_set_cpu(cpu, &resctrl_group_default.cpu_mask);
return 0;
}
/* [FIXME] remove related resource when cpu offline */
static int mpam_offline_cpu(unsigned int cpu)
{
pr_info("offline cpu\n");
......@@ -540,9 +563,68 @@ static int resctrl_group_cpus_show(struct kernfs_open_file *of,
return ret;
}
static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
{
struct rdtgroup *crgrp;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
/* update the child mon group masks as well*/
list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
}
int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
{
struct rdtgroup *r, *crgrp;
struct list_head *head;
/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
/* Can't drop from default group */
if (rdtgrp == &resctrl_group_default) {
rdt_last_cmd_puts("Can't drop CPUs from default group\n");
return -EINVAL;
}
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&resctrl_group_default.cpu_mask,
&resctrl_group_default.cpu_mask, tmpmask);
update_closid_rmid(tmpmask, &resctrl_group_default);
}
/*
* If we added cpus, remove them from previous group and
* the prev group's child groups that owned them
* and update per-cpu closid/rmid.
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
list_for_each_entry(r, &resctrl_all_groups, resctrl_group_list) {
if (r == rdtgrp)
continue;
cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
if (cpumask_weight(tmpmask1))
cpumask_rdtgrp_clear(r, tmpmask1);
}
update_closid_rmid(tmpmask, rdtgrp);
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy(&rdtgrp->cpu_mask, newmask);
/*
* Clear child mon group masks since there is a new parent mask
* now and update the rmid for the cpus the child lost.
*/
head = &rdtgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
update_closid_rmid(tmpmask, rdtgrp);
cpumask_clear(&crgrp->cpu_mask);
}
return 0;
}
......
......@@ -217,7 +217,7 @@ static void show_doms(struct seq_file *s, struct resctrl_resource *r, int partid
if (sep)
seq_puts(s, ";");
seq_printf(s, rr->format_str, dom->id, max_data_width,
dom->ctrl_val[partid]);
rr->msr_read(dom, partid));
sep = true;
}
seq_puts(s, "\n");
......
......@@ -35,6 +35,36 @@
*/
bool rdt_mon_capable;
static int pmg_free_map;
void pmg_init(void)
{
int pmg_max = 16;
pmg_free_map = BIT_MASK(pmg_max) - 1;
/* pmg 0 is always reserved for the default group */
pmg_free_map &= ~1;
}
int alloc_pmg(void)
{
u32 pmg = ffs(pmg_free_map);
if (pmg == 0)
return -ENOSPC;
pmg--;
pmg_free_map &= ~(1 << pmg);
return pmg;
}
void free_pmg(u32 pmg)
{
pmg_free_map |= 1 << pmg;
}
/*
* As of now the RMIDs allocation is global.
* However we keep track of which packages the RMIDs
......@@ -42,10 +72,10 @@ bool rdt_mon_capable;
*/
int alloc_rmid(void)
{
return 0;
return alloc_pmg();
}
void free_rmid(u32 rmid)
void free_rmid(u32 pmg)
{
free_pmg(pmg);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册