提交 db74dc13 编写于 作者: H Hui Tang 提交者: Zheng Zengkai

bpf:programmable: Fix build error of 'stack exceeds 512 bytes'

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5RMFU
CVE: NA

--------------------------------

1.Change arg type of 'bpf_get_cpumask_info' to avoid
  bpf program stack exceeds 512 bytes.
2.Fix back-edge error in sample 'sched_select_core'
3.Fix loop too complex in sample 'sached_select_core'

Changes in v2:
  Move cpu initialization out of the for loop.

Fixes: 1bf0417b ("sched: programmable: Add helper function for cpu topo...")
Fixes: 2c1189e3 ("samples:bpf: Add samples for cfs select core")
Signed-off-by: NHui Tang <tanghui20@huawei.com>
上级 eb51ccd7
...@@ -3903,7 +3903,7 @@ union bpf_attr { ...@@ -3903,7 +3903,7 @@ union bpf_attr {
* Return * Return
* 0 on success, or a negative error in case of failure. * 0 on success, or a negative error in case of failure.
* *
* int bpf_get_cpumask_info(struct bpf_cpumask_info *cpus, int len) * int bpf_get_cpumask_info(struct bpf_map *map, struct bpf_cpumask_info *cpus)
* Description * Description
* Get system cpus returned in *cpus*. * Get system cpus returned in *cpus*.
* Return * Return
......
...@@ -70,10 +70,9 @@ const struct bpf_func_proto bpf_init_cpu_topology_proto = { ...@@ -70,10 +70,9 @@ const struct bpf_func_proto bpf_init_cpu_topology_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
BPF_CALL_2(bpf_get_cpumask_info, struct bpf_cpumask_info *, cpus, BPF_CALL_2(bpf_get_cpumask_info, struct bpf_map *, map, struct bpf_cpumask_info *, cpus)
int, len)
{ {
if (len != sizeof(*cpus)) if (!cpus)
return -EINVAL; return -EINVAL;
cpumask_copy(&cpus->cpu_possible_cpumask, cpu_possible_mask); cpumask_copy(&cpus->cpu_possible_cpumask, cpu_possible_mask);
...@@ -92,6 +91,6 @@ const struct bpf_func_proto bpf_get_cpumask_info_proto = { ...@@ -92,6 +91,6 @@ const struct bpf_func_proto bpf_get_cpumask_info_proto = {
.func = bpf_get_cpumask_info, .func = bpf_get_cpumask_info,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_CONST_SIZE, .arg2_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
}; };
...@@ -62,7 +62,7 @@ struct tag_info { ...@@ -62,7 +62,7 @@ struct tag_info {
struct tag_info tag_tbl[] = { struct tag_info tag_tbl[] = {
{TAG_NONE, ""}, {TAG_NONE, ""},
{TAG_ID(1), "0-3"}, {TAG_ID(1), "0-1"},
{TAG_ID(2), "4-7"}, {TAG_ID(2), "4-7"},
{TAG_MAX, ""}, {TAG_MAX, ""},
}; };
...@@ -94,13 +94,17 @@ static struct cpumask *select_better_cpus(struct task_struct *p, ...@@ -94,13 +94,17 @@ static struct cpumask *select_better_cpus(struct task_struct *p,
long min_util = INT_MIN; long min_util = INT_MIN;
struct task_group *tg; struct task_group *tg;
long spare; long spare;
int cpu; int cpu, i;
if (!prefer_cpus_valid(prefer_cpus, (void *)getVal(p->cpus_ptr))) if (!prefer_cpus_valid(prefer_cpus, (void *)getVal(p->cpus_ptr)))
return (void *)getVal(p->cpus_ptr); return (void *)getVal(p->cpus_ptr);
tg = p->sched_task_group; tg = p->sched_task_group;
libbpf_for_each_cpu(cpu, prefer_cpus) { for (i = 0, cpu = -1; i < BPF_SCHED_LOOP_MAX; i++) {
cpu = libbpf_cpumask_next(cpu, (void *)getVal(prefer_cpus));
if (cpu >= libbpf_nr_cpus_ids())
break;
if (idlest_cpu && libbpf_available_idle_cpu(cpu)) { if (idlest_cpu && libbpf_available_idle_cpu(cpu)) {
*idlest_cpu = cpu; *idlest_cpu = cpu;
} else if (idlest_cpu) { } else if (idlest_cpu) {
...@@ -159,9 +163,14 @@ int BPF_PROG(cfs_select_cpu_range, struct sched_migrate_ctx *h_ctx) ...@@ -159,9 +163,14 @@ int BPF_PROG(cfs_select_cpu_range, struct sched_migrate_ctx *h_ctx)
SEC("sched/cfs_select_rq_exit") SEC("sched/cfs_select_rq_exit")
int BPF_PROG(cfs_select_cpu_range_exit, struct sched_migrate_ctx *h_ctx) int BPF_PROG(cfs_select_cpu_range_exit, struct sched_migrate_ctx *h_ctx)
{ {
struct task_struct *p = getVal(h_ctx->task);
long tag = getVal(p->tag);
int *idlest_cpu; int *idlest_cpu;
int key = 0; int key = 0;
if (tag <= TAG_NONE || tag >= TAG_MAX)
return SELECT_RQ_EXIT_CPU_VALID;
idlest_cpu = bpf_map_lookup_elem(&map_idlest_cpu, &key); idlest_cpu = bpf_map_lookup_elem(&map_idlest_cpu, &key);
if (!idlest_cpu) { if (!idlest_cpu) {
libbpf_sched_set_task_cpus_ptr(h_ctx, (void *)getVal(h_ctx->cpus_allowed)); libbpf_sched_set_task_cpus_ptr(h_ctx, (void *)getVal(h_ctx->cpus_allowed));
...@@ -186,7 +195,7 @@ static int find_idlest_cpu(struct task_struct *p, int parent) ...@@ -186,7 +195,7 @@ static int find_idlest_cpu(struct task_struct *p, int parent)
int cpu; int cpu;
int i; int i;
for (i = 0, cpu = -1; i < NR_CPUS; i++) { for (i = 0, cpu = -1; i < BPF_SCHED_LOOP_MAX; i++) {
cpu = libbpf_cpumask_next(cpu, (void *)getVal(p->cpus_ptr)); cpu = libbpf_cpumask_next(cpu, (void *)getVal(p->cpus_ptr));
if (cpu >= libbpf_nr_cpus_ids()) if (cpu >= libbpf_nr_cpus_ids())
break; break;
...@@ -203,17 +212,26 @@ static int find_idlest_cpu(struct task_struct *p, int parent) ...@@ -203,17 +212,26 @@ static int find_idlest_cpu(struct task_struct *p, int parent)
static int select_idle_cpu(struct task_struct *p, int parent, int prev_cpu) static int select_idle_cpu(struct task_struct *p, int parent, int prev_cpu)
{ {
int cpu; int cpu, i;
if (libbpf_available_idle_cpu(prev_cpu)) if (libbpf_available_idle_cpu(prev_cpu))
return prev_cpu; return prev_cpu;
if (libbpf_available_idle_cpu(parent)) if (libbpf_available_idle_cpu(parent))
return prev_cpu; return parent;
cpu = libbpf_cpumask_next_wrap(prev_cpu - 1,
(void *)getVal(p->cpus_ptr),
prev_cpu, false);
for (i = 0; i < BPF_SCHED_LOOP_MAX; i++) {
if (cpu >= libbpf_nr_cpumask_bits())
break;
libbpf_for_each_cpu_wrap(cpu, (void *)getVal(p->cpus_ptr), prev_cpu) {
if (libbpf_available_idle_cpu(cpu)) if (libbpf_available_idle_cpu(cpu))
return cpu; return cpu;
cpu = libbpf_cpumask_next_wrap(cpu, (void *)getVal(p->cpus_ptr),
prev_cpu, true);
} }
return prev_cpu; return prev_cpu;
......
...@@ -4613,7 +4613,7 @@ union bpf_attr { ...@@ -4613,7 +4613,7 @@ union bpf_attr {
* Return * Return
* 0 on success, or a negative error in case of failure. * 0 on success, or a negative error in case of failure.
* *
* int bpf_get_cpumask_info(struct bpf_cpumask_info *cpus, int len) * int bpf_get_cpumask_info(struct bpf_map *map, struct bpf_cpumask_info *cpus)
* Description * Description
* Get system cpus returned in *cpus*. * Get system cpus returned in *cpus*.
* Return * Return
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
/* set bigger value may lead verifier failed */
#define BPF_SCHED_LOOP_MAX 1024
#define INVALID_PTR ((void *)(0UL)) #define INVALID_PTR ((void *)(0UL))
#define getVal(P) \ #define getVal(P) \
({ \ ({ \
...@@ -69,6 +71,13 @@ static __always_inline int libbpf_nr_cpumask_bits(void); ...@@ -69,6 +71,13 @@ static __always_inline int libbpf_nr_cpumask_bits(void);
#endif #endif
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, u32);
__type(value, struct bpf_cpumask_info);
__uint(max_entries, 1);
} map_cpumask_info SEC(".maps");
static __always_inline long libbpf_cpumask_copy(struct cpumask *dst, static __always_inline long libbpf_cpumask_copy(struct cpumask *dst,
struct cpumask *src) struct cpumask *src)
{ {
...@@ -228,58 +237,93 @@ static __always_inline long libbpf_cpumask_cpulist_parse(char *src1, ...@@ -228,58 +237,93 @@ static __always_inline long libbpf_cpumask_cpulist_parse(char *src1,
static __always_inline int libbpf_num_active_cpus(void) static __always_inline int libbpf_num_active_cpus(void)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
if (!cpus)
return -1;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); bpf_get_cpumask_info(&map_cpumask_info, cpus);
return getVal(cpus.nums_active_cpus); return getVal(cpus->nums_active_cpus);
} }
static __always_inline int libbpf_num_possible_cpus(void) static __always_inline int libbpf_num_possible_cpus(void)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
return getVal(cpus.nums_possible_cpus); if (!cpus)
return -1;
bpf_get_cpumask_info(&map_cpumask_info, cpus);
return getVal(cpus->nums_possible_cpus);
} }
static __always_inline void libbpf_possible_cpus_mask(struct cpumask *mask) static __always_inline void libbpf_possible_cpus_mask(struct cpumask *mask)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
if (!cpus)
return;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); bpf_get_cpumask_info(&map_cpumask_info, cpus);
libbpf_cpumask_copy(mask, &cpus.cpu_possible_cpumask); libbpf_cpumask_copy(mask, &cpus->cpu_possible_cpumask);
} }
static __always_inline void libbpf_active_cpus_mask(struct cpumask *mask) static __always_inline void libbpf_active_cpus_mask(struct cpumask *mask)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
libbpf_cpumask_copy(mask, &cpus.cpu_active_cpumask); if (!cpus)
return;
bpf_get_cpumask_info(&map_cpumask_info, cpus);
libbpf_cpumask_copy(mask, &cpus->cpu_active_cpumask);
} }
static __always_inline void libbpf_isolate_cpus_mask(struct cpumask *mask) static __always_inline void libbpf_isolate_cpus_mask(struct cpumask *mask)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
if (!cpus)
return;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); bpf_get_cpumask_info(&map_cpumask_info, cpus);
libbpf_cpumask_copy(mask, &cpus.cpu_isolate_cpumask); libbpf_cpumask_copy(mask, &cpus->cpu_isolate_cpumask);
} }
static __always_inline int libbpf_nr_cpus_ids(void) static __always_inline int libbpf_nr_cpus_ids(void)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
return getVal(cpus.nr_cpu_ids); if (!cpus)
return -1;
bpf_get_cpumask_info(&map_cpumask_info, cpus);
return getVal(cpus->nr_cpu_ids);
} }
static __always_inline int libbpf_nr_cpumask_bits(void) static __always_inline int libbpf_nr_cpumask_bits(void)
{ {
struct bpf_cpumask_info cpus; struct bpf_cpumask_info *cpus;
int key = 0;
cpus = bpf_map_lookup_elem(&map_cpumask_info, &key);
if (!cpus)
return -1;
bpf_get_cpumask_info(&cpus, sizeof(cpus)); bpf_get_cpumask_info(&map_cpumask_info, cpus);
return getVal(cpus.bpf_nr_cpumask_bits); return getVal(cpus->bpf_nr_cpumask_bits);
} }
static __always_inline unsigned long libbpf_cfs_load_avg_of(int cpu) static __always_inline unsigned long libbpf_cfs_load_avg_of(int cpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册