提交 32bd7eb5 编写于 作者: L Li Zefan 提交者: Ingo Molnar

sched: Remove remaining USER_SCHED code

This is left over from commit 7c941438 ("sched: Remove USER_SCHED"")
Signed-off-by: NLi Zefan <lizf@cn.fujitsu.com>
Acked-by: NDhaval Giani <dhaval.giani@gmail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: David Howells <dhowells@redhat.com>
LKML-Reference: <4BA9A05F.7010407@cn.fujitsu.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 c9494727
......@@ -604,8 +604,7 @@ config RT_GROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
to users or control groups (depending on the "Basis for grouping tasks"
setting below. If enabled, it will also make it impossible to
to task groups. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.
......
......@@ -15,7 +15,6 @@
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
#include "cred-internals.h"
/*
* Leveraged for setting/resetting capabilities
......
/* Internal credentials stuff
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
/*
* user.c
*/
static inline void sched_switch_user(struct task_struct *p)
{
#ifdef CONFIG_USER_SCHED
sched_move_task(p);
#endif /* CONFIG_USER_SCHED */
}
......@@ -16,7 +16,6 @@
#include <linux/init_task.h>
#include <linux/security.h>
#include <linux/cn_proc.h>
#include "cred-internals.h"
#if 0
#define kdebug(FMT, ...) \
......@@ -557,8 +556,6 @@ int commit_creds(struct cred *new)
atomic_dec(&old->user->processes);
alter_cred_subscribers(old, -2);
sched_switch_user(task);
/* send notifications */
if (new->uid != old->uid ||
new->euid != old->euid ||
......
......@@ -55,7 +55,6 @@
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include "cred-internals.h"
static void exit_mm(struct task_struct * tsk);
......
......@@ -173,11 +173,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
task_group_path(tg, path, sizeof(path));
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
{
uid_t uid = cfs_rq->tg->uid;
SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
}
#else
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#endif
......
......@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
#include "cred-internals.h"
struct user_namespace init_user_ns = {
.kref = {
......@@ -137,9 +136,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up, *new;
/* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
* atomic.
*/
/* Make uid_hash_find() + uid_hash_insert() atomic. */
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
......@@ -161,11 +158,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
/* This case is not possible when CONFIG_USER_SCHED
* is defined, since we serialize alloc_uid() using
* uids_mutex. Hence no need to call
* sched_destroy_user() or remove_user_sysfs_dir().
*/
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册