pid.c 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Generic pidhash and scalable, time-bounded PID allocator
 *
4 5
 * (C) 2002-2003 Nadia Yvette Chambers, IBM
 * (C) 2004 Nadia Yvette Chambers, Oracle
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * (C) 2002-2004 Ingo Molnar, Red Hat
 *
 * pid-structures are backing objects for tasks sharing a given ID to chain
 * against. There is very little to them aside from hashing them and
 * parking tasks using given ID's on a list.
 *
 * The hash is always changed with the tasklist_lock write-acquired,
 * and the hash is only accessed with the tasklist_lock at least
 * read-acquired, so there's no additional SMP locking needed here.
 *
 * We have a list of bitmap pages, which bitmaps represent the PID space.
 * Allocating and freeing PIDs is completely lockless. The worst-case
 * allocation scenario when all but one out of 1 million PIDs possible are
 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 22 23 24 25 26
 *
 * Pid namespaces:
 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
 *     Many thanks to Oleg Nesterov for comments and help
 *
L
Linus Torvalds 已提交
27 28 29
 */

#include <linux/mm.h>
30
#include <linux/export.h>
L
Linus Torvalds 已提交
31 32
#include <linux/slab.h>
#include <linux/init.h>
33
#include <linux/rculist.h>
L
Linus Torvalds 已提交
34 35
#include <linux/bootmem.h>
#include <linux/hash.h>
36
#include <linux/pid_namespace.h>
37
#include <linux/init_task.h>
38
#include <linux/syscalls.h>
39
#include <linux/proc_ns.h>
40
#include <linux/proc_fs.h>
L
Linus Torvalds 已提交
41

42 43
#define pid_hashfn(nr, ns)	\
	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44
static struct hlist_head *pid_hash;
45
static unsigned int pidhash_shift = 4;
46
struct pid init_struct_pid = INIT_STRUCT_PID;
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54

int pid_max = PID_MAX_DEFAULT;

#define RESERVED_PIDS		300

int pid_max_min = RESERVED_PIDS + 1;
int pid_max_max = PID_MAX_LIMIT;

55 56
static inline int mk_pid(struct pid_namespace *pid_ns,
		struct pidmap *map, int off)
S
Sukadev Bhattiprolu 已提交
57
{
58
	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
S
Sukadev Bhattiprolu 已提交
59 60
}

L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69
#define find_next_offset(map, off)					\
		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)

/*
 * PID-map pages start out as NULL, they get allocated upon
 * first use and are never deallocated. This way a low pid_max
 * value does not cause lots of bitmaps to be allocated, but
 * the scheme scales to up to 4 million PIDs, runtime.
 */
70
struct pid_namespace init_pid_ns = {
C
Cedric Le Goater 已提交
71 72 73
	.kref = {
		.refcount       = ATOMIC_INIT(2),
	},
S
Sukadev Bhattiprolu 已提交
74 75 76
	.pidmap = {
		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
	},
77
	.last_pid = 0,
78
	.nr_hashed = PIDNS_HASH_ADDING,
79 80
	.level = 0,
	.child_reaper = &init_task,
81
	.user_ns = &init_user_ns,
82
	.ns.inum = PROC_PID_INIT_INO,
83 84 85
#ifdef CONFIG_PID_NS
	.ns.ops = &pidns_operations,
#endif
S
Sukadev Bhattiprolu 已提交
86
};
87
EXPORT_SYMBOL_GPL(init_pid_ns);
L
Linus Torvalds 已提交
88

89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * Note: disable interrupts while the pidmap_lock is held as an
 * interrupt might come in and do read_lock(&tasklist_lock).
 *
 * If we don't disable interrupts there is a nasty deadlock between
 * detach_pid()->free_pid() and another cpu that does
 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 * read_lock(&tasklist_lock);
 *
 * After we clean up the tasklist_lock and know there are no
 * irq handlers that take it we can leave the interrupts enabled.
 * For now it is easier to be safe than to prove it can't happen.
 */
S
Sukadev Bhattiprolu 已提交
102

L
Linus Torvalds 已提交
103 104
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);

105
static void free_pidmap(struct upid *upid)
L
Linus Torvalds 已提交
106
{
107 108 109
	int nr = upid->nr;
	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
	int offset = nr & BITS_PER_PAGE_MASK;
L
Linus Torvalds 已提交
110 111 112 113 114

	clear_bit(offset, map->page);
	atomic_inc(&map->nr_free);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
/*
 * If we started walking pids at 'base', is 'a' seen before 'b'?
 */
static int pid_before(int base, int a, int b)
{
	/*
	 * This is the same as saying
	 *
	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
	 * and that mapping orders 'a' and 'b' with respect to 'base'.
	 */
	return (unsigned)(a - base) < (unsigned)(b - base);
}

/*
130 131 132
 * We might be racing with someone else trying to set pid_ns->last_pid
 * at the pid allocation time (there's also a sysctl for this, but racing
 * with this one is OK, see comment in kernel/pid_namespace.c about it).
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 * We want the winner to have the "later" value, because if the
 * "earlier" value prevails, then a pid may get reused immediately.
 *
 * Since pids rollover, it is not sufficient to just pick the bigger
 * value.  We have to consider where we started counting from.
 *
 * 'base' is the value of pid_ns->last_pid that we observed when
 * we started looking for a pid.
 *
 * 'pid' is the pid that we eventually found.
 */
static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
{
	int prev;
	int last_write = base;
	do {
		prev = last_write;
		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
}

154
static int alloc_pidmap(struct pid_namespace *pid_ns)
L
Linus Torvalds 已提交
155
{
156
	int i, offset, max_scan, pid, last = pid_ns->last_pid;
157
	struct pidmap *map;
L
Linus Torvalds 已提交
158 159 160 161 162

	pid = last + 1;
	if (pid >= pid_max)
		pid = RESERVED_PIDS;
	offset = pid & BITS_PER_PAGE_MASK;
163
	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
164 165 166 167 168 169
	/*
	 * If last_pid points into the middle of the map->page we
	 * want to scan this bitmap block twice, the second time
	 * we start with offset == 0 (or RESERVED_PIDS).
	 */
	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
L
Linus Torvalds 已提交
170 171
	for (i = 0; i <= max_scan; ++i) {
		if (unlikely(!map->page)) {
S
Sukadev Bhattiprolu 已提交
172
			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
L
Linus Torvalds 已提交
173 174 175 176
			/*
			 * Free the page if someone raced with us
			 * installing it:
			 */
177
			spin_lock_irq(&pidmap_lock);
178
			if (!map->page) {
S
Sukadev Bhattiprolu 已提交
179
				map->page = page;
180 181
				page = NULL;
			}
182
			spin_unlock_irq(&pidmap_lock);
183
			kfree(page);
L
Linus Torvalds 已提交
184
			if (unlikely(!map->page))
185
				return -ENOMEM;
L
Linus Torvalds 已提交
186 187
		}
		if (likely(atomic_read(&map->nr_free))) {
188
			for ( ; ; ) {
L
Linus Torvalds 已提交
189 190
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->nr_free);
191
					set_last_pid(pid_ns, last, pid);
L
Linus Torvalds 已提交
192 193 194
					return pid;
				}
				offset = find_next_offset(map, offset);
195 196
				if (offset >= BITS_PER_PAGE)
					break;
197
				pid = mk_pid(pid_ns, map, offset);
198 199 200
				if (pid >= pid_max)
					break;
			}
L
Linus Torvalds 已提交
201
		}
202
		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
L
Linus Torvalds 已提交
203 204 205
			++map;
			offset = 0;
		} else {
206
			map = &pid_ns->pidmap[0];
L
Linus Torvalds 已提交
207 208 209 210
			offset = RESERVED_PIDS;
			if (unlikely(last == offset))
				break;
		}
211
		pid = mk_pid(pid_ns, map, offset);
L
Linus Torvalds 已提交
212
	}
213
	return -EAGAIN;
L
Linus Torvalds 已提交
214 215
}

216
int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
217 218
{
	int offset;
219
	struct pidmap *map, *end;
220

221 222 223
	if (last >= PID_MAX_LIMIT)
		return -1;

224
	offset = (last + 1) & BITS_PER_PAGE_MASK;
225 226
	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
227
	for (; map < end; map++, offset = 0) {
228 229 230 231
		if (unlikely(!map->page))
			continue;
		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
		if (offset < BITS_PER_PAGE)
232
			return mk_pid(pid_ns, map, offset);
233 234 235 236
	}
	return -1;
}

237
void put_pid(struct pid *pid)
238
{
239 240
	struct pid_namespace *ns;

241 242
	if (!pid)
		return;
243

244
	ns = pid->numbers[pid->level].ns;
245
	if ((atomic_read(&pid->count) == 1) ||
246
	     atomic_dec_and_test(&pid->count)) {
247
		kmem_cache_free(ns->pid_cachep, pid);
248
		put_pid_ns(ns);
249
	}
250
}
251
EXPORT_SYMBOL_GPL(put_pid);
252 253 254 255 256 257 258

static void delayed_put_pid(struct rcu_head *rhp)
{
	struct pid *pid = container_of(rhp, struct pid, rcu);
	put_pid(pid);
}

259
void free_pid(struct pid *pid)
260 261
{
	/* We can be called with write_lock_irq(&tasklist_lock) held */
262
	int i;
263 264 265
	unsigned long flags;

	spin_lock_irqsave(&pidmap_lock, flags);
266 267
	for (i = 0; i <= pid->level; i++) {
		struct upid *upid = pid->numbers + i;
268
		struct pid_namespace *ns = upid->ns;
269
		hlist_del_rcu(&upid->pid_chain);
270
		switch(--ns->nr_hashed) {
271
		case 2:
272 273 274 275 276 277 278
		case 1:
			/* When all that is left in the pid namespace
			 * is the reaper wake up the reaper.  The reaper
			 * may be sleeping in zap_pid_ns_processes().
			 */
			wake_up_process(ns->child_reaper);
			break;
279 280 281 282 283
		case PIDNS_HASH_ADDING:
			/* Handle a fork failure of the first process */
			WARN_ON(ns->child_reaper);
			ns->nr_hashed = 0;
			/* fall through */
284 285 286
		case 0:
			schedule_work(&ns->proc_work);
			break;
287
		}
288
	}
289 290
	spin_unlock_irqrestore(&pidmap_lock, flags);

291
	for (i = 0; i <= pid->level; i++)
292
		free_pidmap(pid->numbers + i);
293

294 295 296
	call_rcu(&pid->rcu, delayed_put_pid);
}

297
struct pid *alloc_pid(struct pid_namespace *ns)
298 299 300
{
	struct pid *pid;
	enum pid_type type;
301 302
	int i, nr;
	struct pid_namespace *tmp;
303
	struct upid *upid;
304
	int retval = -ENOMEM;
305

306
	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
307
	if (!pid)
308
		return ERR_PTR(retval);
309

310
	tmp = ns;
311
	pid->level = ns->level;
312 313
	for (i = ns->level; i >= 0; i--) {
		nr = alloc_pidmap(tmp);
314 315
		if (IS_ERR_VALUE(nr)) {
			retval = nr;
316
			goto out_free;
317
		}
318

319 320 321 322 323
		pid->numbers[i].nr = nr;
		pid->numbers[i].ns = tmp;
		tmp = tmp->parent;
	}

324 325 326 327 328
	if (unlikely(is_child_reaper(pid))) {
		if (pid_ns_prepare_proc(ns))
			goto out_free;
	}

329
	get_pid_ns(ns);
330 331 332 333
	atomic_set(&pid->count, 1);
	for (type = 0; type < PIDTYPE_MAX; ++type)
		INIT_HLIST_HEAD(&pid->tasks[type]);

334
	upid = pid->numbers + ns->level;
335
	spin_lock_irq(&pidmap_lock);
336
	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
337
		goto out_unlock;
338
	for ( ; upid >= pid->numbers; --upid) {
339 340
		hlist_add_head_rcu(&upid->pid_chain,
				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
341 342
		upid->ns->nr_hashed++;
	}
343 344 345 346
	spin_unlock_irq(&pidmap_lock);

	return pid;

347
out_unlock:
348
	spin_unlock_irq(&pidmap_lock);
349 350
	put_pid_ns(ns);

351
out_free:
352 353
	while (++i <= ns->level)
		free_pidmap(pid->numbers + i);
354

355
	kmem_cache_free(ns->pid_cachep, pid);
356
	return ERR_PTR(retval);
357 358
}

359 360 361 362 363 364 365
void disable_pid_allocation(struct pid_namespace *ns)
{
	spin_lock_irq(&pidmap_lock);
	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
	spin_unlock_irq(&pidmap_lock);
}

366
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
L
Linus Torvalds 已提交
367
{
368 369
	struct upid *pnr;

370
	hlist_for_each_entry_rcu(pnr,
371 372 373 374
			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
		if (pnr->nr == nr && pnr->ns == ns)
			return container_of(pnr, struct pid,
					numbers[ns->level]);
L
Linus Torvalds 已提交
375 376 377

	return NULL;
}
378
EXPORT_SYMBOL_GPL(find_pid_ns);
L
Linus Torvalds 已提交
379

380 381
struct pid *find_vpid(int nr)
{
382
	return find_pid_ns(nr, task_active_pid_ns(current));
383 384 385
}
EXPORT_SYMBOL_GPL(find_vpid);

386 387 388
/*
 * attach_pid() must be called with the tasklist_lock write-held.
 */
389
void attach_pid(struct task_struct *task, enum pid_type type)
L
Linus Torvalds 已提交
390
{
391 392
	struct pid_link *link = &task->pids[type];
	hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
L
Linus Torvalds 已提交
393 394
}

395 396
static void __change_pid(struct task_struct *task, enum pid_type type,
			struct pid *new)
L
Linus Torvalds 已提交
397
{
398 399 400
	struct pid_link *link;
	struct pid *pid;
	int tmp;
L
Linus Torvalds 已提交
401

402 403
	link = &task->pids[type];
	pid = link->pid;
L
Linus Torvalds 已提交
404

405
	hlist_del_rcu(&link->node);
406
	link->pid = new;
L
Linus Torvalds 已提交
407

408 409 410
	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
		if (!hlist_empty(&pid->tasks[tmp]))
			return;
L
Linus Torvalds 已提交
411

412
	free_pid(pid);
L
Linus Torvalds 已提交
413 414
}

415 416 417 418 419 420 421 422 423
void detach_pid(struct task_struct *task, enum pid_type type)
{
	__change_pid(task, type, NULL);
}

void change_pid(struct task_struct *task, enum pid_type type,
		struct pid *pid)
{
	__change_pid(task, type, pid);
424
	attach_pid(task, type);
425 426
}

427
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
428
void transfer_pid(struct task_struct *old, struct task_struct *new,
429 430 431 432 433 434
			   enum pid_type type)
{
	new->pids[type].pid = old->pids[type].pid;
	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
}

435
struct task_struct *pid_task(struct pid *pid, enum pid_type type)
L
Linus Torvalds 已提交
436
{
437 438 439
	struct task_struct *result = NULL;
	if (pid) {
		struct hlist_node *first;
A
Arnd Bergmann 已提交
440
		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
441
					      lockdep_tasklist_lock_is_held());
442 443 444 445 446
		if (first)
			result = hlist_entry(first, struct task_struct, pids[(type)].node);
	}
	return result;
}
447
EXPORT_SYMBOL(pid_task);
L
Linus Torvalds 已提交
448

449
/*
450
 * Must be called under rcu_read_lock().
451
 */
452
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
453
{
454 455
	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
456
	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
457
}
L
Linus Torvalds 已提交
458

459 460
struct task_struct *find_task_by_vpid(pid_t vnr)
{
461
	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
462 463
}

464 465 466 467
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
	struct pid *pid;
	rcu_read_lock();
468 469
	if (type != PIDTYPE_PID)
		task = task->group_leader;
470
	pid = get_pid(rcu_dereference(task->pids[type].pid));
471 472 473
	rcu_read_unlock();
	return pid;
}
474
EXPORT_SYMBOL_GPL(get_task_pid);
475

476
struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
477 478 479 480 481 482 483 484
{
	struct task_struct *result;
	rcu_read_lock();
	result = pid_task(pid, type);
	if (result)
		get_task_struct(result);
	rcu_read_unlock();
	return result;
L
Linus Torvalds 已提交
485
}
486
EXPORT_SYMBOL_GPL(get_pid_task);
L
Linus Torvalds 已提交
487

488
struct pid *find_get_pid(pid_t nr)
L
Linus Torvalds 已提交
489 490 491
{
	struct pid *pid;

492
	rcu_read_lock();
493
	pid = get_pid(find_vpid(nr));
494
	rcu_read_unlock();
L
Linus Torvalds 已提交
495

496
	return pid;
L
Linus Torvalds 已提交
497
}
498
EXPORT_SYMBOL_GPL(find_get_pid);
L
Linus Torvalds 已提交
499

500 501 502 503 504 505 506 507 508 509 510 511
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
{
	struct upid *upid;
	pid_t nr = 0;

	if (pid && ns->level <= pid->level) {
		upid = &pid->numbers[ns->level];
		if (upid->ns == ns)
			nr = upid->nr;
	}
	return nr;
}
512
EXPORT_SYMBOL_GPL(pid_nr_ns);
513

E
Eric W. Biederman 已提交
514 515
pid_t pid_vnr(struct pid *pid)
{
516
	return pid_nr_ns(pid, task_active_pid_ns(current));
E
Eric W. Biederman 已提交
517 518 519
}
EXPORT_SYMBOL_GPL(pid_vnr);

520 521
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
			struct pid_namespace *ns)
522
{
523 524 525 526
	pid_t nr = 0;

	rcu_read_lock();
	if (!ns)
527
		ns = task_active_pid_ns(current);
528 529 530
	if (likely(pid_alive(task))) {
		if (type != PIDTYPE_PID)
			task = task->group_leader;
531
		nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
532 533 534 535
	}
	rcu_read_unlock();

	return nr;
536
}
537
EXPORT_SYMBOL(__task_pid_nr_ns);
538 539 540 541 542 543 544

pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
	return pid_nr_ns(task_tgid(tsk), ns);
}
EXPORT_SYMBOL(task_tgid_nr_ns);

545 546 547 548 549 550
struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{
	return ns_of_pid(task_pid(tsk));
}
EXPORT_SYMBOL_GPL(task_active_pid_ns);

551
/*
552
 * Used by proc to find the first pid that is greater than or equal to nr.
553
 *
554
 * If there is a pid at nr this function is exactly the same as find_pid_ns.
555
 */
556
struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
557 558 559 560
{
	struct pid *pid;

	do {
561
		pid = find_pid_ns(nr, ns);
562 563
		if (pid)
			break;
564
		nr = next_pidmap(ns, nr);
565 566 567 568 569
	} while (nr > 0);

	return pid;
}

L
Linus Torvalds 已提交
570 571 572 573 574 575 576
/*
 * The pid hash table is scaled according to the amount of memory in the
 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
 * more.
 */
void __init pidhash_init(void)
{
577
	unsigned int i, pidhash_size;
L
Linus Torvalds 已提交
578

579 580
	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
					   HASH_EARLY | HASH_SMALL,
581 582
					   &pidhash_shift, NULL,
					   0, 4096);
583
	pidhash_size = 1U << pidhash_shift;
L
Linus Torvalds 已提交
584

585 586
	for (i = 0; i < pidhash_size; i++)
		INIT_HLIST_HEAD(&pid_hash[i]);
L
Linus Torvalds 已提交
587 588 589 590
}

void __init pidmap_init(void)
{
Z
Zhen Lei 已提交
591
	/* Verify no one has done anything silly: */
592 593
	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);

594 595 596 597 598 599 600
	/* bump default and minimum pid_max based on number of cpus */
	pid_max = min(pid_max_max, max_t(int, pid_max,
				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
	pid_max_min = max_t(int, pid_max_min,
				PIDS_PER_CPU_MIN * num_possible_cpus());
	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);

601
	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
602
	/* Reserve PID 0. We never call free_pidmap(0) */
603 604
	set_bit(0, init_pid_ns.pidmap[0].page);
	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
605

606
	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
607
			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
L
Linus Torvalds 已提交
608
}