diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2c30aceed1e9981c33b2d7ee6f46c356adade42f..3e22147307e4bbe08b0d58e580de4e21d12f6e8a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8603,6 +8603,13 @@ static void sched_free_group(struct task_group *tg) kmem_cache_free(task_group_cache, tg); } +#ifdef CONFIG_BPF_SCHED +static inline void tg_init_tag(struct task_group *tg, struct task_group *ptg) +{ + tg->tag = ptg->tag; +} +#endif + /* allocate runqueue etc for a new task group */ struct task_group *sched_create_group(struct task_group *parent) { @@ -8623,6 +8630,10 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; +#ifdef CONFIG_BPF_SCHED + tg_init_tag(tg, parent); +#endif + alloc_uclamp_sched_group(tg, parent); return tg; @@ -8694,6 +8705,14 @@ static void sched_change_group(struct task_struct *tsk, int type) sched_change_qos_group(tsk, tg); #endif +#ifdef CONFIG_BPF_SCHED + /* + * This function has cleared and restored the task status, + * so we do not need to dequeue and enqueue the task again. + */ + tsk->tag = tg->tag; +#endif + #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_change_group) tsk->sched_class->task_change_group(tsk, type); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ad00ba5f6d82eeaaa3c567a146365dd90be4cf3d..49a10a4bd54c9d2088ad05d6fc3b07e809d5c518 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -454,7 +454,12 @@ struct task_group { struct uclamp_se uclamp[UCLAMP_CNT]; #endif +#ifdef CONFIG_BPF_SCHED + /* Used to pad the tag of a group */ + KABI_USE(1, long tag) +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4)