提交 7d3cd1a2 编写于 作者: W Wang ShaoBo 提交者: Zheng Zengkai

arm64/mpam: Add wait queue for monitor alloc and free

hulk inclusion
category: feature
feature: ARM MPAM support
bugzilla: 48265
CVE: NA

--------------------------------

For MPAM, a rmid can do monitoring work only with a monitor resource
allocated, we adopt a mechanism for monitor resource dynamic allocation
and recycling, it is different from Intel-RDT operation who creates a
kworker thread for dynamically monitoring Cache usage and checks if it
is below a threshold adjustable for rmid free, for we have detected that
this method will affect the cpu utilization in many cases, sometimes this
influence cannot be accepted.

Our method is simple, as different resource's monitor number varies, we
deliever two list, one for storing rmids which has exclusive monitor
resource and another for storing this rmids which have monitor resource
shared, this shared monitor id always be 0. it works like this, if a new
rmid apply for a resource monitor which is in used, then we put this rmid
to the tail of latter list and temporarily give a default monitor id 0
util someone releases available monitor resource, if this new rmid has
all resources' monitor resource needed, then it will be put into exclusive
list.

This implements the LRU allocation of monitor resources and give users
part control rights of allocation and release, if resctrl group's quantity
can be guaranteed or user don't need monitoring too many groups
synchronously, this is a more appropriate way for user deployment, not
only that, also can it avoid the risk of inaccuracy in monitoring when
monitoring operation happen to too many groups at the same time.
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
Reviewed-by: NXiongfeng Wang <wangxiongfeng2@huawei.com>
Reviewed-by: NCheng Jian <cj.chengjian@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 0b16164d
......@@ -358,12 +358,6 @@ int resctrl_group_schemata_show(struct kernfs_open_file *of,
struct rdt_domain *mpam_find_domain(struct resctrl_resource *r, int id,
struct list_head **pos);
int resctrl_group_alloc_mon(struct rdtgroup *grp);
u16 mpam_resctrl_max_mon_num(void);
void mon_init(void);
extern int mpam_rmid_to_partid_pmg(int rmid, int *partid, int *pmg);
#endif /* _ASM_ARM64_MPAM_H */
......@@ -53,7 +53,6 @@ struct mongroup {
struct rdtgroup *parent;
struct list_head crdtgrp_list;
u32 rmid;
u32 mon;
int init;
};
......
......@@ -589,6 +589,7 @@ int resctrl_group_mondata_show(struct seq_file *m, void *arg)
struct list_head *head;
struct rdtgroup *entry;
hw_closid_t hw_closid;
hw_monid_t hw_monid;
enum resctrl_conf_type type = CDP_CODE;
resctrl_cdp_map(clos, rdtgrp->closid.reqpartid,
......@@ -609,7 +610,10 @@ int resctrl_group_mondata_show(struct seq_file *m, void *arg)
return ret;
md.u.pmg = pmg;
md.u.mon = entry->mon.mon;
resctrl_cdp_map(mon, get_rmid_mon(entry->mon.rmid,
r->rid), type, hw_monid);
md.u.mon = hw_monid_val(hw_monid);
usage += resctrl_dom_mon_data(r, d, md.priv);
}
}
......@@ -664,7 +668,8 @@ static int resctrl_mkdir_mondata_dom(struct kernfs_node *parent_kn,
/* monitoring use reqpartid (reqpartid) */
resctrl_cdp_map(clos, prgrp->closid.reqpartid, s->conf_type, hw_closid);
md.u.partid = hw_closid_val(hw_closid);
resctrl_cdp_map(mon, prgrp->mon.mon, s->conf_type, hw_monid);
resctrl_cdp_map(mon, get_rmid_mon(prgrp->mon.rmid, r->rid),
s->conf_type, hw_monid);
md.u.mon = hw_monid_val(hw_monid);
ret = mpam_rmid_to_partid_pmg(prgrp->mon.rmid, NULL, &pmg);
......
......@@ -196,4 +196,8 @@ int __init mpam_resctrl_init(void);
int mpam_resctrl_set_default_cpu(unsigned int cpu);
void mpam_resctrl_clear_default_cpu(unsigned int cpu);
int assoc_rmid_with_mon(u32 rmid);
void deassoc_rmid_with_mon(u32 rmid);
u32 get_rmid_mon(u32 rmid, enum resctrl_resource_level rid);
int rmid_mon_ptrs_init(u32 nr_rmids);
#endif
......@@ -34,51 +34,298 @@
*/
bool rdt_mon_capable;
/*
* A simple LRU monitor allocation machanism, each
* monitor free map occupies two section, one for
* allocation and another for recording.
struct rmid_entry {
u32 rmid;
u32 mon[RDT_NUM_RESOURCES];
struct list_head mon_exclusive_q;
struct list_head mon_wait_q;
};
/**
* @rmid_mon_exclusive_all List of allocated RMIDs with
* exclusive available mon.
*/
static LIST_HEAD(rmid_mon_exclusive_all);
/**
* @rmid_mon_wait_all List of allocated RMIDs with default
* 0 mon and wait for exclusive available mon.
*/
static LIST_HEAD(rmid_mon_wait_all);
static u32 rmid_ptrs_len;
/**
* @rmid_entry - The entry in the mon list.
*/
static int mon_free_map[2];
static u8 alloc_idx, record_idx;
static struct rmid_entry *rmid_ptrs;
void mon_init(void)
static int mon_free_map[RDT_NUM_RESOURCES];
static void mon_init(void)
{
int num_mon;
u16 mon_num;
u32 times, flag;
struct mpam_resctrl_res *res;
struct resctrl_resource *r;
struct raw_resctrl_resource *rr;
num_mon = mpam_resctrl_max_mon_num();
hw_alloc_times_validate(times, flag);
/* for cdp on or off */
num_mon = rounddown(num_mon, times);
for_each_supported_resctrl_exports(res) {
r = &res->resctrl_res;
rr = r->res;
mon_free_map[0] = BIT_MASK(num_mon) - 1;
mon_free_map[1] = 0;
hw_alloc_times_validate(times, flag);
/* for cdp*/
mon_num = rounddown(rr->num_mon, times);
mon_free_map[r->rid] = BIT_MASK(mon_num) - 1;
alloc_idx = 0;
record_idx = 1;
/* mon = 0 is reserved */
mon_free_map[r->rid] &= ~(BIT_MASK(times) - 1);
}
}
int resctrl_lru_request_mon(void)
static u32 mon_alloc(enum resctrl_resource_level rid)
{
u32 mon = 0;
u32 times, flag;
hw_alloc_times_validate(times, flag);
mon = ffs(mon_free_map[alloc_idx]);
mon = ffs(mon_free_map[rid]);
if (mon == 0)
return -ENOSPC;
mon--;
mon_free_map[alloc_idx] &= ~(GENMASK(mon + times - 1, mon));
mon_free_map[record_idx] |= GENMASK(mon + times - 1, mon);
mon_free_map[rid] &= ~(GENMASK(mon + times - 1, mon));
return mon;
}
static void mon_free(u32 mon, enum resctrl_resource_level rid)
{
u32 times, flag;
hw_alloc_times_validate(times, flag);
mon_free_map[rid] |= GENMASK(mon + times - 1, mon);
}
static inline struct rmid_entry *__rmid_entry(u32 rmid)
{
struct rmid_entry *entry;
if (rmid >= rmid_ptrs_len)
return NULL;
entry = &rmid_ptrs[rmid];
WARN_ON(entry->rmid != rmid);
return entry;
}
if (!mon_free_map[alloc_idx]) {
alloc_idx = record_idx;
record_idx ^= 0x1;
static void mon_wait_q_init(void)
{
INIT_LIST_HEAD(&rmid_mon_wait_all);
}
static void mon_exclusive_q_init(void)
{
INIT_LIST_HEAD(&rmid_mon_exclusive_all);
}
static void put_mon_wait_q(struct rmid_entry *entry)
{
list_add_tail(&entry->mon_wait_q, &rmid_mon_wait_all);
}
static void put_mon_exclusive_q(struct rmid_entry *entry)
{
list_add_tail(&entry->mon_exclusive_q, &rmid_mon_exclusive_all);
}
static void mon_wait_q_del(struct rmid_entry *entry)
{
list_del(&entry->mon_wait_q);
}
static void mon_exclusive_q_del(struct rmid_entry *entry)
{
list_del(&entry->mon_exclusive_q);
}
static int is_mon_wait_q_exist(u32 rmid)
{
struct rmid_entry *entry;
list_for_each_entry(entry, &rmid_mon_wait_all, mon_wait_q) {
if (entry->rmid == rmid)
return 1;
}
return mon;
return 0;
}
static int is_mon_exclusive_q_exist(u32 rmid)
{
struct rmid_entry *entry;
list_for_each_entry(entry, &rmid_mon_exclusive_all, mon_exclusive_q) {
if (entry->rmid == rmid)
return 1;
}
return 0;
}
static int is_rmid_mon_wait_q_exist(u32 rmid)
{
struct rmid_entry *entry;
list_for_each_entry(entry, &rmid_mon_wait_all, mon_wait_q) {
if (entry->rmid == rmid)
return 1;
}
return 0;
}
int rmid_mon_ptrs_init(u32 nr_rmids)
{
struct rmid_entry *entry = NULL;
int i;
if (rmid_ptrs)
kfree(rmid_ptrs);
rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
if (!rmid_ptrs)
return -ENOMEM;
rmid_ptrs_len = nr_rmids;
for (i = 0; i < nr_rmids; i++) {
entry = &rmid_ptrs[i];
entry->rmid = i;
}
mon_exclusive_q_init();
mon_wait_q_init();
/*
* RMID 0 is special and is always allocated. It's used for all
* tasks monitoring.
*/
entry = __rmid_entry(0);
if (!entry) {
kfree(rmid_ptrs);
rmid_ptrs = NULL;
return -EINVAL;
}
put_mon_exclusive_q(entry);
mon_init();
return 0;
}
int assoc_rmid_with_mon(u32 rmid)
{
int mon;
bool has_mon_wait = false;
struct rmid_entry *entry;
struct mpam_resctrl_res *res;
struct resctrl_resource *r;
if (is_mon_exclusive_q_exist(rmid) ||
is_rmid_mon_wait_q_exist(rmid))
return -EINVAL;
entry = __rmid_entry(rmid);
if (!entry)
return -EINVAL;
for_each_supported_resctrl_exports(res) {
r = &res->resctrl_res;
if (!r->mon_enabled)
continue;
mon = mon_alloc(r->rid);
if (mon < 0) {
entry->mon[r->rid] = 0;
has_mon_wait = true;
} else {
entry->mon[r->rid] = mon;
}
}
if (has_mon_wait)
put_mon_wait_q(entry);
else
put_mon_exclusive_q(entry);
return 0;
}
void deassoc_rmid_with_mon(u32 rmid)
{
bool has_mon_wait;
struct mpam_resctrl_res *res;
struct resctrl_resource *r;
struct rmid_entry *entry = __rmid_entry(rmid);
struct rmid_entry *wait, *tmp;
if (!entry)
return;
if (!is_mon_wait_q_exist(rmid) &&
!is_mon_exclusive_q_exist(rmid))
return;
if (is_mon_wait_q_exist(rmid))
mon_wait_q_del(entry);
else
mon_exclusive_q_del(entry);
list_for_each_entry_safe(wait, tmp, &rmid_mon_wait_all, mon_wait_q) {
has_mon_wait = false;
for_each_supported_resctrl_exports(res) {
r = &res->resctrl_res;
if (!r->mon_enabled)
continue;
if (!wait->mon[r->rid]) {
wait->mon[r->rid] = entry->mon[r->rid];
entry->mon[r->rid] = 0;
}
if (!wait->mon[r->rid])
has_mon_wait = true;
}
if (!has_mon_wait) {
mon_wait_q_del(wait);
put_mon_exclusive_q(wait);
}
}
for_each_supported_resctrl_exports(res) {
r = &res->resctrl_res;
if (!r->mon_enabled)
continue;
if (entry->mon[r->rid])
mon_free(entry->mon[r->rid], r->rid);
}
}
u32 get_rmid_mon(u32 rmid, enum resctrl_resource_level rid)
{
struct rmid_entry *entry = __rmid_entry(rmid);
if (!entry)
return 0;
if (!is_mon_wait_q_exist(rmid) && !is_mon_exclusive_q_exist(rmid))
return 0;
return entry->mon[rid];
}
......@@ -64,11 +64,6 @@ int max_name_width, max_data_width;
*/
bool rdt_alloc_capable;
/*
* Indicate the max number of monitor supported.
*/
static u32 max_mon_num;
/*
* Indicate if had mount cdpl2/cdpl3 option.
*/
......@@ -774,8 +769,11 @@ static int rmid_remap_matrix_init(void)
STRIDE_CHK_AND_WARN(stride);
return 0;
ret = rmid_mon_ptrs_init(rmid_remap_matrix.nr_usage);
if (ret)
goto out;
return 0;
out:
return ret;
}
......@@ -788,13 +786,7 @@ int resctrl_id_init(void)
if (ret)
return ret;
ret = rmid_remap_matrix_init();
if (ret)
return ret;
mon_init();
return 0;
return rmid_remap_matrix_init();
}
static int is_rmid_valid(int rmid)
......@@ -869,6 +861,10 @@ static int __rmid_alloc(int partid)
goto out;
}
ret = assoc_rmid_with_mon(rmid[0]);
if (ret)
goto out;
return rmid[0];
out:
......@@ -908,6 +904,8 @@ void rmid_free(int rmid)
}
STRIDE_CHK_AND_WARN(stride);
deassoc_rmid_with_mon(rmid);
}
int mpam_rmid_to_partid_pmg(int rmid, int *partid, int *pmg)
......@@ -1984,25 +1982,3 @@ void resctrl_resource_reset(void)
*/
resctrl_cdp_enabled = false;
}
u16 mpam_resctrl_max_mon_num(void)
{
struct mpam_resctrl_res *res;
u16 mon_num = USHRT_MAX;
struct raw_resctrl_resource *rr;
if (max_mon_num)
return max_mon_num;
for_each_supported_resctrl_exports(res) {
rr = res->resctrl_res.res;
mon_num = min(mon_num, rr->num_mon);
}
if (mon_num == USHRT_MAX)
mon_num = 0;
max_mon_num = mon_num;
return mon_num;
}
......@@ -353,25 +353,14 @@ static void mkdir_mondata_all_prepare_clean(struct resctrl_group *prgrp)
static int mkdir_mondata_all_prepare(struct resctrl_group *rdtgrp)
{
int ret = 0;
int mon;
struct resctrl_group *prgrp;
mon = resctrl_lru_request_mon();
if (mon < 0) {
rdt_last_cmd_puts("out of monitors\n");
ret = -EINVAL;
goto out;
}
rdtgrp->mon.mon = mon;
if (rdtgrp->type == RDTMON_GROUP) {
prgrp = rdtgrp->mon.parent;
rdtgrp->closid.intpartid = prgrp->closid.intpartid;
}
out:
return ret;
return 0;
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册