提交 5ab551d6 编写于 作者: L Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes: group scheduling corner case fix, two deadline scheduler
  fixes, effective_load() overflow fix, nested sleep fix, 6144 CPUs
  system fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix RCU stall upon -ENOMEM in sched_create_group()
  sched/deadline: Avoid double-accounting in case of missed deadlines
  sched/deadline: Fix migration of SCHED_DEADLINE tasks
  sched: Fix odd values in effective_load() calculations
  sched, fanotify: Deal with nested sleeps
  sched: Fix KMALLOC_MAX_SIZE overflow during cpumask allocation
...@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, ...@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent; struct fsnotify_event *kevent;
char __user *start; char __user *start;
int ret; int ret;
DEFINE_WAIT(wait); DEFINE_WAIT_FUNC(wait, woken_wake_function);
start = buf; start = buf;
group = file->private_data; group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group); pr_debug("%s: group=%p\n", __func__, group);
add_wait_queue(&group->notification_waitq, &wait);
while (1) { while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex); mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count); kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex); mutex_unlock(&group->notification_mutex);
...@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, ...@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
if (start != buf) if (start != buf)
break; break;
schedule();
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
continue; continue;
} }
...@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, ...@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
buf += ret; buf += ret;
count -= ret; count -= ret;
} }
remove_wait_queue(&group->notification_waitq, &wait);
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT) if (start != buf && ret != -EFAULT)
ret = buf - start; ret = buf - start;
return ret; return ret;
......
...@@ -7112,9 +7112,6 @@ void __init sched_init(void) ...@@ -7112,9 +7112,6 @@ void __init sched_init(void)
#endif #endif
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **); alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif #endif
if (alloc_size) { if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
...@@ -7135,13 +7132,13 @@ void __init sched_init(void) ...@@ -7135,13 +7132,13 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
}
#ifdef CONFIG_CPUMASK_OFFSTACK #ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (void *)ptr; per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
ptr += cpumask_size(); cpumask_size(), GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
} }
#endif /* CONFIG_CPUMASK_OFFSTACK */
init_rt_bandwidth(&def_rt_bandwidth, init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime()); global_rt_period(), global_rt_runtime());
......
...@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) ...@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
static static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{ {
int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); return (dl_se->runtime <= 0);
int rorun = dl_se->runtime <= 0;
if (!rorun && !dmiss)
return 0;
/*
* If we are beyond our current deadline and we are still
* executing, then we have already used some of the runtime of
* the next instance. Thus, if we do not account that, we are
* stealing bandwidth from the system at each deadline miss!
*/
if (dmiss) {
dl_se->runtime = rorun ? dl_se->runtime : 0;
dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
}
return 1;
} }
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
...@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, ...@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise, * parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime. * we want a replenishment of its runtime.
*/ */
if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
replenish_dl_entity(dl_se, pi_se);
else
update_dl_entity(dl_se, pi_se); update_dl_entity(dl_se, pi_se);
else if (flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se);
__enqueue_dl_entity(dl_se); __enqueue_dl_entity(dl_se);
} }
......
...@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) ...@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{ {
/* init_cfs_bandwidth() was not called */
if (!cfs_b->throttled_cfs_rq.next)
return;
hrtimer_cancel(&cfs_b->period_timer); hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer); hrtimer_cancel(&cfs_b->slack_timer);
} }
...@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) ...@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
* wl = S * s'_i; see (2) * wl = S * s'_i; see (2)
*/ */
if (W > 0 && w < W) if (W > 0 && w < W)
wl = (w * tg->shares) / W; wl = (w * (long)tg->shares) / W;
else else
wl = tg->shares; wl = tg->shares;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册