1. 16 11月, 2014 2 次提交
    • S
      sched/cputime: Fix clock_nanosleep()/clock_gettime() inconsistency · 6e998916
      Stanislaw Gruszka 提交于
      Commit d670ec13 "posix-cpu-timers: Cure SMP wobbles" fixes one glibc
      test case in cost of breaking another one. After that commit, calling
      clock_nanosleep(TIMER_ABSTIME, X) and then clock_gettime(&Y) can result
      of Y time being smaller than X time.
      
      Reproducer/tester can be found further below, it can be compiled and ran by:
      
      	gcc -o tst-cpuclock2 tst-cpuclock2.c -pthread
      	while ./tst-cpuclock2 ; do : ; done
      
      This reproducer, when running on a buggy kernel, will complain
      about "clock_gettime difference too small".
      
      Issue happens because on start in thread_group_cputimer() we initialize
      sum_exec_runtime of cputimer with threads runtime not yet accounted and
      then add the threads runtime to running cputimer again on scheduler
      tick, making it's sum_exec_runtime bigger than actual threads runtime.
      
      KOSAKI Motohiro posted a fix for this problem, but that patch was never
      applied: https://lkml.org/lkml/2013/5/26/191 .
      
      This patch takes different approach to cure the problem. It calls
      update_curr() when cputimer starts, that assure we will have updated
      stats of running threads and on the next schedule tick we will account
      only the runtime that elapsed from cputimer start. That also assure we
      have consistent state between cpu times of individual threads and cpu
      time of the process consisted by those threads.
      
      Full reproducer (tst-cpuclock2.c):
      
      	#define _GNU_SOURCE
      	#include <unistd.h>
      	#include <sys/syscall.h>
      	#include <stdio.h>
      	#include <time.h>
      	#include <pthread.h>
      	#include <stdint.h>
      	#include <inttypes.h>
      
      	/* Parameters for the Linux kernel ABI for CPU clocks.  */
      	#define CPUCLOCK_SCHED          2
      	#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
      		((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
      
      	static pthread_barrier_t barrier;
      
      	/* Help advance the clock.  */
      	static void *chew_cpu(void *arg)
      	{
      		pthread_barrier_wait(&barrier);
      		while (1) ;
      
      		return NULL;
      	}
      
      	/* Don't use the glibc wrapper.  */
      	static int do_nanosleep(int flags, const struct timespec *req)
      	{
      		clockid_t clock_id = MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED);
      
      		return syscall(SYS_clock_nanosleep, clock_id, flags, req, NULL);
      	}
      
      	static int64_t tsdiff(const struct timespec *before, const struct timespec *after)
      	{
      		int64_t before_i = before->tv_sec * 1000000000ULL + before->tv_nsec;
      		int64_t after_i = after->tv_sec * 1000000000ULL + after->tv_nsec;
      
      		return after_i - before_i;
      	}
      
      	int main(void)
      	{
      		int result = 0;
      		pthread_t th;
      
      		pthread_barrier_init(&barrier, NULL, 2);
      
      		if (pthread_create(&th, NULL, chew_cpu, NULL) != 0) {
      			perror("pthread_create");
      			return 1;
      		}
      
      		pthread_barrier_wait(&barrier);
      
      		/* The test.  */
      		struct timespec before, after, sleeptimeabs;
      		int64_t sleepdiff, diffabs;
      		const struct timespec sleeptime = {.tv_sec = 0,.tv_nsec = 100000000 };
      
      		/* The relative nanosleep.  Not sure why this is needed, but its presence
      		   seems to make it easier to reproduce the problem.  */
      		if (do_nanosleep(0, &sleeptime) != 0) {
      			perror("clock_nanosleep");
      			return 1;
      		}
      
      		/* Get the current time.  */
      		if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &before) < 0) {
      			perror("clock_gettime[2]");
      			return 1;
      		}
      
      		/* Compute the absolute sleep time based on the current time.  */
      		uint64_t nsec = before.tv_nsec + sleeptime.tv_nsec;
      		sleeptimeabs.tv_sec = before.tv_sec + nsec / 1000000000;
      		sleeptimeabs.tv_nsec = nsec % 1000000000;
      
      		/* Sleep for the computed time.  */
      		if (do_nanosleep(TIMER_ABSTIME, &sleeptimeabs) != 0) {
      			perror("absolute clock_nanosleep");
      			return 1;
      		}
      
      		/* Get the time after the sleep.  */
      		if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &after) < 0) {
      			perror("clock_gettime[3]");
      			return 1;
      		}
      
      		/* The time after sleep should always be equal to or after the absolute sleep
      		   time passed to clock_nanosleep.  */
      		sleepdiff = tsdiff(&sleeptimeabs, &after);
      		if (sleepdiff < 0) {
      			printf("absolute clock_nanosleep woke too early: %" PRId64 "\n", sleepdiff);
      			result = 1;
      
      			printf("Before %llu.%09llu\n", before.tv_sec, before.tv_nsec);
      			printf("After  %llu.%09llu\n", after.tv_sec, after.tv_nsec);
      			printf("Sleep  %llu.%09llu\n", sleeptimeabs.tv_sec, sleeptimeabs.tv_nsec);
      		}
      
      		/* The difference between the timestamps taken before and after the
      		   clock_nanosleep call should be equal to or more than the duration of the
      		   sleep.  */
      		diffabs = tsdiff(&before, &after);
      		if (diffabs < sleeptime.tv_nsec) {
      			printf("clock_gettime difference too small: %" PRId64 "\n", diffabs);
      			result = 1;
      		}
      
      		pthread_cancel(th);
      
      		return result;
      	}
      Signed-off-by: NStanislaw Gruszka <sgruszka@redhat.com>
      Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
      Cc: Rik van Riel <riel@redhat.com>
      Cc: Frederic Weisbecker <fweisbec@gmail.com>
      Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Oleg Nesterov <oleg@redhat.com>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/20141112155843.GA24803@redhat.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
      6e998916
    • P
      sched/cputime: Fix cpu_timer_sample_group() double accounting · 23cfa361
      Peter Zijlstra 提交于
      While looking over the cpu-timer code I found that we appear to add
      the delta for the calling task twice, through:
      
        cpu_timer_sample_group()
          thread_group_cputimer()
            thread_group_cputime()
              times->sum_exec_runtime += task_sched_runtime();
      
          *sample = cputime.sum_exec_runtime + task_delta_exec();
      
      Which would make the sample run ahead, making the sleep short.
      Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
      Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Oleg Nesterov <oleg@redhat.com>
      Cc: Stanislaw Gruszka <sgruszka@redhat.com>
      Cc: Christoph Lameter <cl@linux.com>
      Cc: Frederic Weisbecker <fweisbec@gmail.com>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Rik van Riel <riel@redhat.com>
      Cc: Tejun Heo <tj@kernel.org>
      Link: http://lkml.kernel.org/r/20141112113737.GI10476@twins.programming.kicks-ass.netSigned-off-by: NIngo Molnar <mingo@kernel.org>
      23cfa361
  2. 10 11月, 2014 1 次提交
    • A
      sched/numa: Fix out of bounds read in sched_init_numa() · c123588b
      Andrey Ryabinin 提交于
      On latest mm + KASan patchset I've got this:
      
          ==================================================================
          BUG: AddressSanitizer: out of bounds access in sched_init_smp+0x3ba/0x62c at addr ffff88006d4bee6c
          =============================================================================
          BUG kmalloc-8 (Not tainted): kasan error
          -----------------------------------------------------------------------------
      
          Disabling lock debugging due to kernel taint
          INFO: Allocated in alloc_vfsmnt+0xb0/0x2c0 age=75 cpu=0 pid=0
           __slab_alloc+0x4b4/0x4f0
           __kmalloc_track_caller+0x15f/0x1e0
           kstrdup+0x44/0x90
           alloc_vfsmnt+0xb0/0x2c0
           vfs_kern_mount+0x35/0x190
           kern_mount_data+0x25/0x50
           pid_ns_prepare_proc+0x19/0x50
           alloc_pid+0x5e2/0x630
           copy_process.part.41+0xdf5/0x2aa0
           do_fork+0xf5/0x460
           kernel_thread+0x21/0x30
           rest_init+0x1e/0x90
           start_kernel+0x522/0x531
           x86_64_start_reservations+0x2a/0x2c
           x86_64_start_kernel+0x15b/0x16a
          INFO: Slab 0xffffea0001b52f80 objects=24 used=22 fp=0xffff88006d4befc0 flags=0x100000000004080
          INFO: Object 0xffff88006d4bed20 @offset=3360 fp=0xffff88006d4bee70
      
          Bytes b4 ffff88006d4bed10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a  ........ZZZZZZZZ
          Object ffff88006d4bed20: 70 72 6f 63 00 6b 6b a5                          proc.kk.
          Redzone ffff88006d4bed28: cc cc cc cc cc cc cc cc                          ........
          Padding ffff88006d4bee68: 5a 5a 5a 5a 5a 5a 5a 5a                          ZZZZZZZZ
          CPU: 0 PID: 1 Comm: swapper/0 Tainted: G    B          3.18.0-rc3-mm1+ #108
          Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
           ffff88006d4be000 0000000000000000 ffff88006d4bed20 ffff88006c86fd18
           ffffffff81cd0a59 0000000000000058 ffff88006d404240 ffff88006c86fd48
           ffffffff811fa3a8 ffff88006d404240 ffffea0001b52f80 ffff88006d4bed20
          Call Trace:
          dump_stack (lib/dump_stack.c:52)
          print_trailer (mm/slub.c:645)
          object_err (mm/slub.c:652)
          ? sched_init_smp (kernel/sched/core.c:6552 kernel/sched/core.c:7063)
          kasan_report_error (mm/kasan/report.c:102 mm/kasan/report.c:178)
          ? kasan_poison_shadow (mm/kasan/kasan.c:48)
          ? kasan_unpoison_shadow (mm/kasan/kasan.c:54)
          ? kasan_poison_shadow (mm/kasan/kasan.c:48)
          ? kasan_kmalloc (mm/kasan/kasan.c:311)
          __asan_load4 (mm/kasan/kasan.c:371)
          ? sched_init_smp (kernel/sched/core.c:6552 kernel/sched/core.c:7063)
          sched_init_smp (kernel/sched/core.c:6552 kernel/sched/core.c:7063)
          kernel_init_freeable (init/main.c:869 init/main.c:997)
          ? finish_task_switch (kernel/sched/sched.h:1036 kernel/sched/core.c:2248)
          ? rest_init (init/main.c:924)
          kernel_init (init/main.c:929)
          ? rest_init (init/main.c:924)
          ret_from_fork (arch/x86/kernel/entry_64.S:348)
          ? rest_init (init/main.c:924)
          Read of size 4 by task swapper/0:
          Memory state around the buggy address:
           ffff88006d4beb80: fc fc fc fc fc fc fc fc fc fc 00 fc fc fc fc fc
           ffff88006d4bec00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
           ffff88006d4bec80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
           ffff88006d4bed00: fc fc fc fc 00 fc fc fc fc fc fc fc fc fc fc fc
           ffff88006d4bed80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
          >ffff88006d4bee00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc 04 fc
                                                                    ^
           ffff88006d4bee80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
           ffff88006d4bef00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
           ffff88006d4bef80: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
           ffff88006d4bf000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
           ffff88006d4bf080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
          ==================================================================
      
      Zero 'level' (e.g. on non-NUMA system) causing out of bounds
      access in this line:
      
           sched_max_numa_distance = sched_domains_numa_distance[level - 1];
      
      Fix this by exiting from sched_init_numa() earlier.
      Signed-off-by: NAndrey Ryabinin <a.ryabinin@samsung.com>
      Reviewed-by: NRik van Riel <riel@redhat.com>
      Fixes: 9942f79b ("sched/numa: Export info needed for NUMA balancing on complex topologies")
      Cc: peterz@infradead.org
      Link: http://lkml.kernel.org/r/1415372020-1871-1-git-send-email-a.ryabinin@samsung.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
      c123588b
  3. 04 11月, 2014 1 次提交
  4. 28 10月, 2014 2 次提交
    • O
      sched: stop the unbound recursion in preempt_schedule_context() · 009f60e2
      Oleg Nesterov 提交于
      preempt_schedule_context() does preempt_enable_notrace() at the end
      and this can call the same function again; exception_exit() is heavy
      and it is quite possible that need-resched is true again.
      
      1. Change this code to dec preempt_count() and check need_resched()
         by hand.
      
      2. As Linus suggested, we can use the PREEMPT_ACTIVE bit and avoid
         the enable/disable dance around __schedule(). But in this case
         we need to move into sched/core.c.
      
      3. Cosmetic, but x86 forgets to declare this function. This doesn't
         really matter because it is only called by asm helpers, still it
         make sense to add the declaration into asm/preempt.h to match
         preempt_schedule().
      Reported-by: NSasha Levin <sasha.levin@oracle.com>
      Signed-off-by: NOleg Nesterov <oleg@redhat.com>
      Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
      Cc: Alexander Graf <agraf@suse.de>
      Cc: Andrew Morton <akpm@linux-foundation.org>
      Cc: Christoph Lameter <cl@linux.com>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
      Cc: Steven Rostedt <rostedt@goodmis.org>
      Cc: Peter Anvin <hpa@zytor.com>
      Cc: Andy Lutomirski <luto@amacapital.net>
      Cc: Denys Vlasenko <dvlasenk@redhat.com>
      Cc: Chuck Ebbert <cebbert.lkml@gmail.com>
      Cc: Frederic Weisbecker <fweisbec@gmail.com>
      Link: http://lkml.kernel.org/r/20141005202322.GB27962@redhat.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
      009f60e2
    • K
      sched: Fix race between task_group and sched_task_group · eeb61e53
      Kirill Tkhai 提交于
      The race may happen when somebody is changing task_group of a forking task.
      Child's cgroup is the same as parent's after dup_task_struct() (there just
      memory copying). Also, cfs_rq and rt_rq are the same as parent's.
      
      But if parent changes its task_group before it's called cgroup_post_fork(),
      we do not reflect this situation on child. Child's cfs_rq and rt_rq remain
      the same, while child's task_group changes in cgroup_post_fork().
      
      To fix this we introduce fork() method, which calls sched_move_task() directly.
      This function changes sched_task_group on appropriate (also its logic has
      no problem with freshly created tasks, so we shouldn't introduce something
      special; we are able just to use it).
      
      Possibly, this decides the Burke Libbey's problem: https://lkml.org/lkml/2014/10/24/456Signed-off-by: NKirill Tkhai <ktkhai@parallels.com>
      Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/1414405105.19914.169.camel@tkhaiSigned-off-by: NIngo Molnar <mingo@kernel.org>
      eeb61e53
  5. 03 10月, 2014 1 次提交
  6. 24 9月, 2014 7 次提交
  7. 21 9月, 2014 1 次提交
  8. 19 9月, 2014 4 次提交
  9. 09 9月, 2014 1 次提交
  10. 07 9月, 2014 1 次提交
    • X
      sched/deadline: Fix a precision problem in the microseconds range · 177ef2a6
      xiaofeng.yan 提交于
      An overrun could happen in function start_hrtick_dl()
      when a task with SCHED_DEADLINE runs in the microseconds
      range.
      
      For example, if a task with SCHED_DEADLINE has the following parameters:
      
        Task  runtime  deadline  period
         P1   200us     500us    500us
      
      The deadline and period from task P1 are less than 1ms.
      
      In order to achieve microsecond precision, we need to enable HRTICK feature
      by the next command:
      
        PC#echo "HRTICK" > /sys/kernel/debug/sched_features
        PC#trace-cmd record -e sched_switch &
        PC#./schedtool -E -t 200000:500000:500000 -e ./test
      
      The binary test is in an endless while(1) loop here.
      Some pieces of trace.dat are as follows:
      
        <idle>-0   157.603157: sched_switch: :R ==> 2481:4294967295: test
        test-2481  157.603203: sched_switch:  2481:R ==> 0:120: swapper/2
        <idle>-0   157.605657: sched_switch:  :R ==> 2481:4294967295: test
        test-2481  157.608183: sched_switch:  2481:R ==> 2483:120: trace-cmd
        trace-cmd-2483 157.609656: sched_switch:2483:R==>2481:4294967295: test
      
      We can get the runtime of P1 from the information above:
      
        runtime = 157.608183 - 157.605657
        runtime = 0.002526(2.526ms)
      
      The correct runtime should be less than or equal to 200us at some point.
      
      The problem is caused by a conditional judgment "delta > 10000"
      in function start_hrtick_dl().
      
      Because no hrtimer start up to control the rest of runtime
      when the reset of runtime is less than 10us.
      
      So the process will continue to run until tick-period is coming.
      
      Move the code with the limit of the least time slice
      from hrtick_start_fair() to hrtick_start() because the
      EDF schedule class also needs this function in start_hrtick_dl().
      
      To fix this problem, we call hrtimer_start() unconditionally in
      start_hrtick_dl(), and make sure the scheduling slice won't be smaller
      than 10us in hrtimer_start().
      Signed-off-by: NXiaofeng Yan <xiaofeng.yan@huawei.com>
      Reviewed-by: NLi Zefan <lizefan@huawei.com>
      Acked-by: NJuri Lelli <juri.lelli@arm.com>
      Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/1409022941-5880-1-git-send-email-xiaofeng.yan@huawei.com
      [ Massaged the changelog and the code. ]
      Signed-off-by: NIngo Molnar <mingo@kernel.org>
      177ef2a6
  11. 25 8月, 2014 1 次提交
  12. 20 8月, 2014 4 次提交
    • K
      sched: Remove double_rq_lock() from __migrate_task() · a1e01829
      Kirill Tkhai 提交于
      Avoid double_rq_lock() and use TASK_ON_RQ_MIGRATING for
      __migrate_task(). The advantage is (obviously) not holding two
      rq->lock's at the same time and thereby increasing parallelism.
      
      The important point to note is that because we acquire dst->lock
      immediately after releasing src->lock the potential wait time of
      task_rq_lock() callers on TASK_ON_RQ_MIGRATING is not longer
      than it would have been in the double rq lock scenario.
      Signed-off-by: NKirill Tkhai <ktkhai@parallels.com>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Paul Turner <pjt@google.com>
      Cc: Oleg Nesterov <oleg@redhat.com>
      Cc: Steven Rostedt <rostedt@goodmis.org>
      Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
      Cc: Kirill Tkhai <tkhai@yandex.ru>
      Cc: Tim Chen <tim.c.chen@linux.intel.com>
      Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/1408528070.23412.89.camel@tkhaiSigned-off-by: NIngo Molnar <mingo@kernel.org>
      a1e01829
    • K
      sched: Teach scheduler to understand TASK_ON_RQ_MIGRATING state · cca26e80
      Kirill Tkhai 提交于
      This is a new p->on_rq state which will be used to indicate that a task
      is in a process of migrating between two RQs. It allows to get
      rid of double_rq_lock(), which we used to use to change a rq of
      a queued task before.
      
      Let's consider an example. To move a task between src_rq and
      dst_rq we will do the following:
      
      	raw_spin_lock(&src_rq->lock);
      	/* p is a task which is queued on src_rq */
      	p = ...;
      
      	dequeue_task(src_rq, p, 0);
      	p->on_rq = TASK_ON_RQ_MIGRATING;
      	set_task_cpu(p, dst_cpu);
      	raw_spin_unlock(&src_rq->lock);
      
          	/*
          	 * Both RQs are unlocked here.
          	 * Task p is dequeued from src_rq
          	 * but its on_rq value is not zero.
          	 */
      
      	raw_spin_lock(&dst_rq->lock);
      	p->on_rq = TASK_ON_RQ_QUEUED;
      	enqueue_task(dst_rq, p, 0);
      	raw_spin_unlock(&dst_rq->lock);
      
      While p->on_rq is TASK_ON_RQ_MIGRATING, task is considered as
      "migrating", and other parallel scheduler actions with it are
      not available to parallel callers. The parallel caller is
      spining till migration is completed.
      
      The unavailable actions are changing of cpu affinity, changing
      of priority etc, in other words all the functionality which used
      to require task_rq(p)->lock before (and related to the task).
      
      To implement TASK_ON_RQ_MIGRATING support we primarily are using
      the following fact. Most of scheduler users (from which we are
      protecting a migrating task) use task_rq_lock() and
      __task_rq_lock() to get the lock of task_rq(p). These primitives
      know that task's cpu may change, and they are spining while the
      lock of the right RQ is not held. We add one more condition into
      them, so they will be also spinning until the migration is
      finished.
      Signed-off-by: NKirill Tkhai <ktkhai@parallels.com>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Paul Turner <pjt@google.com>
      Cc: Oleg Nesterov <oleg@redhat.com>
      Cc: Steven Rostedt <rostedt@goodmis.org>
      Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
      Cc: Kirill Tkhai <tkhai@yandex.ru>
      Cc: Tim Chen <tim.c.chen@linux.intel.com>
      Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/1408528062.23412.88.camel@tkhaiSigned-off-by: NIngo Molnar <mingo@kernel.org>
      cca26e80
    • K
      sched: Add wrapper for checking task_struct::on_rq · da0c1e65
      Kirill Tkhai 提交于
      Implement task_on_rq_queued() and use it everywhere instead of
      on_rq check. No functional changes.
      
      The only exception is we do not use the wrapper in
      check_for_tasks(), because it requires to export
      task_on_rq_queued() in global header files. Next patch in series
      would return it back, so we do not twist it from here to there.
      Signed-off-by: NKirill Tkhai <ktkhai@parallels.com>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Paul Turner <pjt@google.com>
      Cc: Oleg Nesterov <oleg@redhat.com>
      Cc: Steven Rostedt <rostedt@goodmis.org>
      Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
      Cc: Kirill Tkhai <tkhai@yandex.ru>
      Cc: Tim Chen <tim.c.chen@linux.intel.com>
      Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhaiSigned-off-by: NIngo Molnar <mingo@kernel.org>
      da0c1e65
    • O
      sched: s/do_each_thread/for_each_process_thread/ in core.c · 5d07f420
      Oleg Nesterov 提交于
      Change kernel/sched/core.c to use for_each_process_thread().
      Signed-off-by: NOleg Nesterov <oleg@redhat.com>
      Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
      Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
      Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
      Cc: Frank Mayhar <fmayhar@google.com>
      Cc: Frederic Weisbecker <fweisbec@redhat.com>
      Cc: Andrew Morton <akpm@linux-foundation.org>
      Cc: Sanjay Rao <srao@redhat.com>
      Cc: Larry Woodman <lwoodman@redhat.com>
      Cc: Rik van Riel <riel@redhat.com>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Link: http://lkml.kernel.org/r/20140813191953.GA19315@redhat.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
      5d07f420
  13. 13 8月, 2014 1 次提交
    • P
      locking: Remove deprecated smp_mb__() barriers · 2e39465a
      Peter Zijlstra 提交于
      Its been a while and there are no in-tree users left, so remove the
      deprecated barriers.
      Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
      Cc: Chen, Gong <gong.chen@linux.intel.com>
      Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
      Cc: Joe Perches <joe@perches.com>
      Cc: John Sullivan <jsrhbz@kanargh.force9.co.uk>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
      Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
      Cc: Theodore Ts'o <tytso@mit.edu>
      Signed-off-by: NIngo Molnar <mingo@kernel.org>
      2e39465a
  14. 12 8月, 2014 1 次提交
  15. 07 8月, 2014 1 次提交
  16. 28 7月, 2014 3 次提交
  17. 16 7月, 2014 3 次提交
  18. 15 7月, 2014 1 次提交
    • T
      cgroup: rename cgroup_subsys->base_cftypes to ->legacy_cftypes · 5577964e
      Tejun Heo 提交于
      Currently, cgroup_subsys->base_cftypes is used for both the unified
      default hierarchy and legacy ones and subsystems can mark each file
      with either CFTYPE_ONLY_ON_DFL or CFTYPE_INSANE if it has to appear
      only on one of them.  This is quite hairy and error-prone.  Also, we
      may end up exposing interface files to the default hierarchy without
      thinking it through.
      
      cgroup_subsys will grow two separate cftype arrays and apply each only
      on the hierarchies of the matching type.  This will allow organizing
      cftypes in a lot clearer way and encourage subsystems to scrutinize
      the interface which is being exposed in the new default hierarchy.
      
      In preparation, this patch renames cgroup_subsys->base_cftypes to
      cgroup_subsys->legacy_cftypes.  This patch is pure rename.
      Signed-off-by: NTejun Heo <tj@kernel.org>
      Acked-by: NNeil Horman <nhorman@tuxdriver.com>
      Acked-by: NLi Zefan <lizefan@huawei.com>
      Cc: Johannes Weiner <hannes@cmpxchg.org>
      Cc: Michal Hocko <mhocko@suse.cz>
      Cc: Vivek Goyal <vgoyal@redhat.com>
      Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
      Cc: Paul Mackerras <paulus@samba.org>
      Cc: Ingo Molnar <mingo@redhat.com>
      Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
      Cc: Aristeu Rozanski <aris@redhat.com>
      Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
      5577964e
  19. 05 7月, 2014 3 次提交
  20. 24 6月, 2014 1 次提交
    • P
      rcu: Reduce overhead of cond_resched() checks for RCU · 4a81e832
      Paul E. McKenney 提交于
      Commit ac1bea85 (Make cond_resched() report RCU quiescent states)
      fixed a problem where a CPU looping in the kernel with but one runnable
      task would give RCU CPU stall warnings, even if the in-kernel loop
      contained cond_resched() calls.  Unfortunately, in so doing, it introduced
      performance regressions in Anton Blanchard's will-it-scale "open1" test.
      The problem appears to be not so much the increased cond_resched() path
      length as an increase in the rate at which grace periods complete, which
      increased per-update grace-period overhead.
      
      This commit takes a different approach to fixing this bug, mainly by
      moving the RCU-visible quiescent state from cond_resched() to
      rcu_note_context_switch(), and by further reducing the check to a
      simple non-zero test of a single per-CPU variable.  However, this
      approach requires that the force-quiescent-state processing send
      resched IPIs to the offending CPUs.  These will be sent only once
      the grace period has reached an age specified by the boot/sysfs
      parameter rcutree.jiffies_till_sched_qs, or once the grace period
      reaches an age halfway to the point at which RCU CPU stall warnings
      will be emitted, whichever comes first.
      Reported-by: NDave Hansen <dave.hansen@intel.com>
      Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
      Cc: Andi Kleen <ak@linux.intel.com>
      Cc: Christoph Lameter <cl@gentwo.org>
      Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
      Cc: Eric Dumazet <eric.dumazet@gmail.com>
      Reviewed-by: NJosh Triplett <josh@joshtriplett.org>
      [ paulmck: Made rcu_momentary_dyntick_idle() as suggested by the
        ktest build robot.  Also fixed smp_mb() comment as noted by
        Oleg Nesterov. ]
      
      Merge with e552592e (Reduce overhead of cond_resched() checks for RCU)
      Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
      4a81e832