diff --git a/include/linux/init_task.h b/include/linux/init_task.h index cae35b6b9aecf9a849085a3c67a56da6c57d69cb..572c65bcc80f42e43ff6bc2f88c2bb02ab08dbfe 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -130,6 +130,7 @@ extern struct group_info init_groups; .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ + .nr_cpus_allowed = NR_CPUS, \ .mm = NULL, \ .active_mm = &init_mm, \ .run_list = LIST_HEAD_INIT(tsk.run_list), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 0846f1f9e196d35c1e9af6f841d77f6f5e13aa03..b07a2cf76401b2b3dc91364d08813e2a7ad1ad26 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -847,6 +847,7 @@ struct sched_class { void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p); + void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); }; struct load_weight { @@ -956,6 +957,7 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; + int nr_cpus_allowed; unsigned int time_slice; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) diff --git a/kernel/fork.c b/kernel/fork.c index 09c0b90a69ccd5a265958e01968f0bbf68af37b7..930c51865ab44a4b75e219acf13a8354ff1410a6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1242,6 +1242,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; + p->nr_cpus_allowed = current->nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); diff --git a/kernel/sched.c b/kernel/sched.c index 357d3a084de84695e187391689d3625798deeaa1..66e99b419b31e7bf3fcd1ce5fc4b759b060ff709 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -343,6 +343,7 @@ struct rt_rq { int rt_load_balance_idx; struct list_head *rt_load_balance_head, *rt_load_balance_curr; unsigned long rt_nr_running; + unsigned long rt_nr_migratory; /* highest queued rt task prio */ int highest_prio; }; @@ -5144,7 +5145,13 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) goto out; } - p->cpus_allowed = new_mask; + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, &new_mask); + else { + p->cpus_allowed = new_mask; + p->nr_cpus_allowed = cpus_weight(new_mask); + } + /* Can the task run on the task's current CPU? If so, we're done */ if (cpu_isset(task_cpu(p), new_mask)) goto out; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c492fd2b2eece53eaad1965bb726aa7ad37dbf0f..ae4995c09aacd7645c464e705e0fa10ce2e62b66 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -33,6 +33,14 @@ static inline void rt_clear_overload(struct rq *rq) atomic_dec(&rto_count); cpu_clear(rq->cpu, rt_overload_mask); } + +static void update_rt_migration(struct rq *rq) +{ + if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) + rt_set_overload(rq); + else + rt_clear_overload(rq); +} #endif /* CONFIG_SMP */ /* @@ -65,8 +73,10 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) #ifdef CONFIG_SMP if (p->prio < rq->rt.highest_prio) rq->rt.highest_prio = p->prio; - if (rq->rt.rt_nr_running > 1) - rt_set_overload(rq); + if (p->nr_cpus_allowed > 1) + rq->rt.rt_nr_migratory++; + + update_rt_migration(rq); #endif /* CONFIG_SMP */ } @@ -88,8 +98,10 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) } /* otherwise leave rq->highest prio alone */ } else rq->rt.highest_prio = MAX_RT_PRIO; - if (rq->rt.rt_nr_running < 2) - rt_clear_overload(rq); + if (p->nr_cpus_allowed > 1) + rq->rt.rt_nr_migratory--; + + update_rt_migration(rq); #endif /* CONFIG_SMP */ } @@ -182,7 +194,8 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - (cpu < 0 || cpu_isset(cpu, p->cpus_allowed))) + (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && + (p->nr_cpus_allowed > 1)) return 1; return 0; } @@ -584,6 +597,32 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, /* don't touch RT tasks */ return 0; } +static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) +{ + int weight = cpus_weight(*new_mask); + + BUG_ON(!rt_task(p)); + + /* + * Update the migration status of the RQ if we have an RT task + * which is running AND changing its weight value. + */ + if (p->se.on_rq && (weight != p->nr_cpus_allowed)) { + struct rq *rq = task_rq(p); + + if ((p->nr_cpus_allowed <= 1) && (weight > 1)) + rq->rt.rt_nr_migratory++; + else if((p->nr_cpus_allowed > 1) && (weight <= 1)) { + BUG_ON(!rq->rt.rt_nr_migratory); + rq->rt.rt_nr_migratory--; + } + + update_rt_migration(rq); + } + + p->cpus_allowed = *new_mask; + p->nr_cpus_allowed = weight; +} #else /* CONFIG_SMP */ # define schedule_tail_balance_rt(rq) do { } while (0) # define schedule_balance_rt(rq, prev) do { } while (0) @@ -637,6 +676,7 @@ const struct sched_class rt_sched_class = { #ifdef CONFIG_SMP .load_balance = load_balance_rt, .move_one_task = move_one_task_rt, + .set_cpus_allowed = set_cpus_allowed_rt, #endif .set_curr_task = set_curr_task_rt,