pid.c 9.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Generic pidhash and scalable, time-bounded PID allocator
 *
 * (C) 2002-2003 William Irwin, IBM
 * (C) 2004 William Irwin, Oracle
 * (C) 2002-2004 Ingo Molnar, Red Hat
 *
 * pid-structures are backing objects for tasks sharing a given ID to chain
 * against. There is very little to them aside from hashing them and
 * parking tasks using given ID's on a list.
 *
 * The hash is always changed with the tasklist_lock write-acquired,
 * and the hash is only accessed with the tasklist_lock at least
 * read-acquired, so there's no additional SMP locking needed here.
 *
 * We have a list of bitmap pages, which bitmaps represent the PID space.
 * Allocating and freeing PIDs is completely lockless. The worst-case
 * allocation scenario when all but one out of 1 million PIDs possible are
 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/hash.h>
29
#include <linux/pspace.h>
L
Linus Torvalds 已提交
30 31

#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
32
static struct hlist_head *pid_hash;
L
Linus Torvalds 已提交
33
static int pidhash_shift;
34
static kmem_cache_t *pid_cachep;
L
Linus Torvalds 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

int pid_max = PID_MAX_DEFAULT;
int last_pid;

#define RESERVED_PIDS		300

int pid_max_min = RESERVED_PIDS + 1;
int pid_max_max = PID_MAX_LIMIT;

#define BITS_PER_PAGE		(PAGE_SIZE*8)
#define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
#define mk_pid(map, off)	(((map) - pidmap_array)*BITS_PER_PAGE + (off))
#define find_next_offset(map, off)					\
		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)

/*
 * PID-map pages start out as NULL, they get allocated upon
 * first use and are never deallocated. This way a low pid_max
 * value does not cause lots of bitmaps to be allocated, but
 * the scheme scales to up to 4 million PIDs, runtime.
 */
56
static struct pidmap pidmap_array[PIDMAP_ENTRIES] =
L
Linus Torvalds 已提交
57 58
	 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };

59 60 61 62 63 64 65 66 67 68 69 70 71
/*
 * Note: disable interrupts while the pidmap_lock is held as an
 * interrupt might come in and do read_lock(&tasklist_lock).
 *
 * If we don't disable interrupts there is a nasty deadlock between
 * detach_pid()->free_pid() and another cpu that does
 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 * read_lock(&tasklist_lock);
 *
 * After we clean up the tasklist_lock and know there are no
 * irq handlers that take it we can leave the interrupts enabled.
 * For now it is easier to be safe than to prove it can't happen.
 */
L
Linus Torvalds 已提交
72 73
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);

74
static fastcall void free_pidmap(int pid)
L
Linus Torvalds 已提交
75
{
76
	struct pidmap *map = pidmap_array + pid / BITS_PER_PAGE;
L
Linus Torvalds 已提交
77 78 79 80 81 82
	int offset = pid & BITS_PER_PAGE_MASK;

	clear_bit(offset, map->page);
	atomic_inc(&map->nr_free);
}

83
static int alloc_pidmap(void)
L
Linus Torvalds 已提交
84 85
{
	int i, offset, max_scan, pid, last = last_pid;
86
	struct pidmap *map;
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100

	pid = last + 1;
	if (pid >= pid_max)
		pid = RESERVED_PIDS;
	offset = pid & BITS_PER_PAGE_MASK;
	map = &pidmap_array[pid/BITS_PER_PAGE];
	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
	for (i = 0; i <= max_scan; ++i) {
		if (unlikely(!map->page)) {
			unsigned long page = get_zeroed_page(GFP_KERNEL);
			/*
			 * Free the page if someone raced with us
			 * installing it:
			 */
101
			spin_lock_irq(&pidmap_lock);
L
Linus Torvalds 已提交
102 103 104 105
			if (map->page)
				free_page(page);
			else
				map->page = (void *)page;
106
			spin_unlock_irq(&pidmap_lock);
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->nr_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->nr_free);
					last_pid = pid;
					return pid;
				}
				offset = find_next_offset(map, offset);
				pid = mk_pid(map, offset);
			/*
			 * find_next_offset() found a bit, the pid from it
			 * is in-bounds, and if we fell back to the last
			 * bitmap block and the final block was the same
			 * as the starting point, pid is before last_pid.
			 */
			} while (offset < BITS_PER_PAGE && pid < pid_max &&
					(i != max_scan || pid < last ||
					    !((last+1) & BITS_PER_PAGE_MASK)));
		}
		if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
			++map;
			offset = 0;
		} else {
			map = &pidmap_array[0];
			offset = RESERVED_PIDS;
			if (unlikely(last == offset))
				break;
		}
		pid = mk_pid(map, offset);
	}
	return -1;
}

143 144 145
static int next_pidmap(int last)
{
	int offset;
146
	struct pidmap *map;
147 148 149 150 151 152 153 154 155 156 157 158 159

	offset = (last + 1) & BITS_PER_PAGE_MASK;
	map = &pidmap_array[(last + 1)/BITS_PER_PAGE];
	for (; map < &pidmap_array[PIDMAP_ENTRIES]; map++, offset = 0) {
		if (unlikely(!map->page))
			continue;
		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
		if (offset < BITS_PER_PAGE)
			return mk_pid(map, offset);
	}
	return -1;
}

160 161 162 163 164 165 166 167
fastcall void put_pid(struct pid *pid)
{
	if (!pid)
		return;
	if ((atomic_read(&pid->count) == 1) ||
	     atomic_dec_and_test(&pid->count))
		kmem_cache_free(pid_cachep, pid);
}
168
EXPORT_SYMBOL_GPL(put_pid);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221

static void delayed_put_pid(struct rcu_head *rhp)
{
	struct pid *pid = container_of(rhp, struct pid, rcu);
	put_pid(pid);
}

fastcall void free_pid(struct pid *pid)
{
	/* We can be called with write_lock_irq(&tasklist_lock) held */
	unsigned long flags;

	spin_lock_irqsave(&pidmap_lock, flags);
	hlist_del_rcu(&pid->pid_chain);
	spin_unlock_irqrestore(&pidmap_lock, flags);

	free_pidmap(pid->nr);
	call_rcu(&pid->rcu, delayed_put_pid);
}

struct pid *alloc_pid(void)
{
	struct pid *pid;
	enum pid_type type;
	int nr = -1;

	pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
	if (!pid)
		goto out;

	nr = alloc_pidmap();
	if (nr < 0)
		goto out_free;

	atomic_set(&pid->count, 1);
	pid->nr = nr;
	for (type = 0; type < PIDTYPE_MAX; ++type)
		INIT_HLIST_HEAD(&pid->tasks[type]);

	spin_lock_irq(&pidmap_lock);
	hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
	spin_unlock_irq(&pidmap_lock);

out:
	return pid;

out_free:
	kmem_cache_free(pid_cachep, pid);
	pid = NULL;
	goto out;
}

struct pid * fastcall find_pid(int nr)
L
Linus Torvalds 已提交
222 223 224 225
{
	struct hlist_node *elem;
	struct pid *pid;

I
Ingo Molnar 已提交
226
	hlist_for_each_entry_rcu(pid, elem,
227
			&pid_hash[pid_hashfn(nr)], pid_chain) {
L
Linus Torvalds 已提交
228 229 230 231 232
		if (pid->nr == nr)
			return pid;
	}
	return NULL;
}
233
EXPORT_SYMBOL_GPL(find_pid);
L
Linus Torvalds 已提交
234

235
int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
L
Linus Torvalds 已提交
236
{
237 238 239 240 241 242
	struct pid_link *link;
	struct pid *pid;

	link = &task->pids[type];
	link->pid = pid = find_pid(nr);
	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
L
Linus Torvalds 已提交
243 244 245 246

	return 0;
}

247
void fastcall detach_pid(struct task_struct *task, enum pid_type type)
L
Linus Torvalds 已提交
248
{
249 250 251
	struct pid_link *link;
	struct pid *pid;
	int tmp;
L
Linus Torvalds 已提交
252

253 254
	link = &task->pids[type];
	pid = link->pid;
L
Linus Torvalds 已提交
255

256 257
	hlist_del_rcu(&link->node);
	link->pid = NULL;
L
Linus Torvalds 已提交
258

259 260 261
	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
		if (!hlist_empty(&pid->tasks[tmp]))
			return;
L
Linus Torvalds 已提交
262

263
	free_pid(pid);
L
Linus Torvalds 已提交
264 265
}

266 267 268 269 270 271 272 273 274
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
			   enum pid_type type)
{
	new->pids[type].pid = old->pids[type].pid;
	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
	old->pids[type].pid = NULL;
}

275
struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
L
Linus Torvalds 已提交
276
{
277 278 279 280 281 282 283 284 285
	struct task_struct *result = NULL;
	if (pid) {
		struct hlist_node *first;
		first = rcu_dereference(pid->tasks[type].first);
		if (first)
			result = hlist_entry(first, struct task_struct, pids[(type)].node);
	}
	return result;
}
L
Linus Torvalds 已提交
286

287 288 289
/*
 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
 */
290
struct task_struct *find_task_by_pid_type(int type, int nr)
291 292 293
{
	return pid_task(find_pid(nr), type);
}
L
Linus Torvalds 已提交
294

295
EXPORT_SYMBOL(find_task_by_pid_type);
L
Linus Torvalds 已提交
296

297 298 299 300 301 302 303 304 305
struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
{
	struct task_struct *result;
	rcu_read_lock();
	result = pid_task(pid, type);
	if (result)
		get_task_struct(result);
	rcu_read_unlock();
	return result;
L
Linus Torvalds 已提交
306 307
}

308
struct pid *find_get_pid(pid_t nr)
L
Linus Torvalds 已提交
309 310 311
{
	struct pid *pid;

312 313 314
	rcu_read_lock();
	pid = get_pid(find_pid(nr));
	rcu_read_unlock();
L
Linus Torvalds 已提交
315

316
	return pid;
L
Linus Torvalds 已提交
317 318
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/*
 * Used by proc to find the first pid that is greater then or equal to nr.
 *
 * If there is a pid at nr this function is exactly the same as find_pid.
 */
struct pid *find_ge_pid(int nr)
{
	struct pid *pid;

	do {
		pid = find_pid(nr);
		if (pid)
			break;
		nr = next_pidmap(nr);
	} while (nr > 0);

	return pid;
}
337
EXPORT_SYMBOL_GPL(find_get_pid);
338

L
Linus Torvalds 已提交
339 340 341 342 343 344 345
/*
 * The pid hash table is scaled according to the amount of memory in the
 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
 * more.
 */
void __init pidhash_init(void)
{
346
	int i, pidhash_size;
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354
	unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);

	pidhash_shift = max(4, fls(megabytes * 4));
	pidhash_shift = min(12, pidhash_shift);
	pidhash_size = 1 << pidhash_shift;

	printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
		pidhash_size, pidhash_shift,
355 356 357 358 359 360 361
		pidhash_size * sizeof(struct hlist_head));

	pid_hash = alloc_bootmem(pidhash_size *	sizeof(*(pid_hash)));
	if (!pid_hash)
		panic("Could not alloc pidhash!\n");
	for (i = 0; i < pidhash_size; i++)
		INIT_HLIST_HEAD(&pid_hash[i]);
L
Linus Torvalds 已提交
362 363 364 365 366
}

void __init pidmap_init(void)
{
	pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
367
	/* Reserve PID 0. We never call free_pidmap(0) */
L
Linus Torvalds 已提交
368 369
	set_bit(0, pidmap_array->page);
	atomic_dec(&pidmap_array->nr_free);
370 371 372 373

	pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
					__alignof__(struct pid),
					SLAB_PANIC, NULL, NULL);
L
Linus Torvalds 已提交
374
}