提交 95e02ca9 编写于 作者: T Thomas Gleixner 提交者: Linus Torvalds

[PATCH] rtmutex: Propagate priority settings into PI lock chains

When the priority of a task, which is blocked on a lock, changes we must
propagate this change into the PI lock chain.  Therefor the chain walk code
is changed to get rid of the references to current to avoid false positives
in the deadlock detector, as setscheduler might be called by a task which
holds the lock on which the task whose priority is changed is blocked.

Also add some comments about the get/put_task_struct usage to avoid
confusion.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 0bafd214
...@@ -1044,11 +1044,13 @@ extern void sched_idle_next(void); ...@@ -1044,11 +1044,13 @@ extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(task_t *p); extern int rt_mutex_getprio(task_t *p);
extern void rt_mutex_setprio(task_t *p, int prio); extern void rt_mutex_setprio(task_t *p, int prio);
extern void rt_mutex_adjust_pi(task_t *p);
#else #else
static inline int rt_mutex_getprio(task_t *p) static inline int rt_mutex_getprio(task_t *p)
{ {
return p->normal_prio; return p->normal_prio;
} }
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif #endif
extern void set_user_nice(task_t *p, long nice); extern void set_user_nice(task_t *p, long nice);
......
...@@ -160,7 +160,8 @@ int max_lock_depth = 1024; ...@@ -160,7 +160,8 @@ int max_lock_depth = 1024;
static int rt_mutex_adjust_prio_chain(task_t *task, static int rt_mutex_adjust_prio_chain(task_t *task,
int deadlock_detect, int deadlock_detect,
struct rt_mutex *orig_lock, struct rt_mutex *orig_lock,
struct rt_mutex_waiter *orig_waiter struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task
__IP_DECL__) __IP_DECL__)
{ {
struct rt_mutex *lock; struct rt_mutex *lock;
...@@ -189,7 +190,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, ...@@ -189,7 +190,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
prev_max = max_lock_depth; prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached " printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth, "task: %s (%d)\n", max_lock_depth,
current->comm, current->pid); top_task->comm, top_task->pid);
} }
put_task_struct(task); put_task_struct(task);
...@@ -229,7 +230,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, ...@@ -229,7 +230,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
} }
/* Deadlock detection */ /* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == current) { if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
spin_unlock(&lock->wait_lock); spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0; ret = deadlock_detect ? -EDEADLK : 0;
...@@ -433,6 +434,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ...@@ -433,6 +434,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
__rt_mutex_adjust_prio(owner); __rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on) { if (owner->pi_blocked_on) {
boost = 1; boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner); get_task_struct(owner);
} }
spin_unlock_irqrestore(&owner->pi_lock, flags); spin_unlock_irqrestore(&owner->pi_lock, flags);
...@@ -441,6 +443,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ...@@ -441,6 +443,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
spin_lock_irqsave(&owner->pi_lock, flags); spin_lock_irqsave(&owner->pi_lock, flags);
if (owner->pi_blocked_on) { if (owner->pi_blocked_on) {
boost = 1; boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner); get_task_struct(owner);
} }
spin_unlock_irqrestore(&owner->pi_lock, flags); spin_unlock_irqrestore(&owner->pi_lock, flags);
...@@ -450,8 +453,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ...@@ -450,8 +453,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
spin_unlock(&lock->wait_lock); spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
waiter __IP__); current __IP__);
spin_lock(&lock->wait_lock); spin_lock(&lock->wait_lock);
...@@ -552,6 +555,7 @@ static void remove_waiter(struct rt_mutex *lock, ...@@ -552,6 +555,7 @@ static void remove_waiter(struct rt_mutex *lock,
if (owner->pi_blocked_on) { if (owner->pi_blocked_on) {
boost = 1; boost = 1;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner); get_task_struct(owner);
} }
spin_unlock_irqrestore(&owner->pi_lock, flags); spin_unlock_irqrestore(&owner->pi_lock, flags);
...@@ -564,11 +568,36 @@ static void remove_waiter(struct rt_mutex *lock, ...@@ -564,11 +568,36 @@ static void remove_waiter(struct rt_mutex *lock,
spin_unlock(&lock->wait_lock); spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
spin_lock(&lock->wait_lock); spin_lock(&lock->wait_lock);
} }
/*
* Recheck the pi chain, in case we got a priority setting
*
* Called from sched_setscheduler
*/
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
unsigned long flags;
spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
spin_unlock_irqrestore(&task->pi_lock, flags);
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
}
/* /*
* Slow path lock function: * Slow path lock function:
*/ */
...@@ -636,6 +665,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ...@@ -636,6 +665,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (unlikely(ret)) if (unlikely(ret))
break; break;
} }
spin_unlock(&lock->wait_lock); spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(&waiter); debug_rt_mutex_print_deadlock(&waiter);
......
...@@ -4070,6 +4070,8 @@ int sched_setscheduler(struct task_struct *p, int policy, ...@@ -4070,6 +4070,8 @@ int sched_setscheduler(struct task_struct *p, int policy,
__task_rq_unlock(rq); __task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags); spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(sched_setscheduler); EXPORT_SYMBOL_GPL(sched_setscheduler);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册