提交 cd8ba7cd 编写于 作者: M Mike Travis 提交者: Ingo Molnar

sched: add new set_cpus_allowed_ptr function

Add a new function that accepts a pointer to the "newly allowed cpus"
cpumask argument.

int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)

The current set_cpus_allowed() function is modified to use the above
but this does not result in an ABI change.  And with some compiler
optimization help, it may not introduce any additional overhead.

Additionally, to enforce the read only nature of the new_mask arg, the
"const" property is migrated to sub-functions called by set_cpus_allowed.
This silences compiler warnings.
Signed-off-by: NMike Travis <travis@sgi.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 e0982e90
...@@ -889,7 +889,8 @@ struct sched_class { ...@@ -889,7 +889,8 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq); void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_new) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p);
void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
void (*join_domain)(struct rq *rq); void (*join_domain)(struct rq *rq);
void (*leave_domain)(struct rq *rq); void (*leave_domain)(struct rq *rq);
...@@ -1502,15 +1503,21 @@ static inline void put_task_struct(struct task_struct *t) ...@@ -1502,15 +1503,21 @@ static inline void put_task_struct(struct task_struct *t)
#define used_math() tsk_used_math(current) #define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p,
const cpumask_t *new_mask);
#else #else
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) static inline int set_cpus_allowed_ptr(struct task_struct *p,
const cpumask_t *new_mask)
{ {
if (!cpu_isset(0, new_mask)) if (!cpu_isset(0, *new_mask))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
#endif #endif
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
extern unsigned long long sched_clock(void); extern unsigned long long sched_clock(void);
......
...@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void) ...@@ -5486,7 +5486,7 @@ static inline void sched_init_granularity(void)
* task must not exit() & deallocate itself prematurely. The * task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held. * call is not atomic; no spinlocks may be held.
*/ */
int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
{ {
struct migration_req req; struct migration_req req;
unsigned long flags; unsigned long flags;
...@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) ...@@ -5494,23 +5494,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
int ret = 0; int ret = 0;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
if (!cpus_intersects(new_mask, cpu_online_map)) { if (!cpus_intersects(*new_mask, cpu_online_map)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (p->sched_class->set_cpus_allowed) if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask); p->sched_class->set_cpus_allowed(p, new_mask);
else { else {
p->cpus_allowed = new_mask; p->cpus_allowed = *new_mask;
p->rt.nr_cpus_allowed = cpus_weight(new_mask); p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
} }
/* Can the task run on the task's current CPU? If so, we're done */ /* Can the task run on the task's current CPU? If so, we're done */
if (cpu_isset(task_cpu(p), new_mask)) if (cpu_isset(task_cpu(p), *new_mask))
goto out; goto out;
if (migrate_task(p, any_online_cpu(new_mask), &req)) { if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */ /* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread); wake_up_process(rq->migration_thread);
...@@ -5523,7 +5523,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) ...@@ -5523,7 +5523,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(set_cpus_allowed); EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/* /*
* Move (not current) task off this cpu, onto dest cpu. We're doing * Move (not current) task off this cpu, onto dest cpu. We're doing
......
...@@ -1123,7 +1123,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -1123,7 +1123,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0; return 0;
} }
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) static void set_cpus_allowed_rt(struct task_struct *p,
const cpumask_t *new_mask)
{ {
int weight = cpus_weight(*new_mask); int weight = cpus_weight(*new_mask);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册