dir.c 42.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/*
 * fs/kernfs/dir.c - kernfs directory implementation
 *
 * Copyright (c) 2001-3 Patrick Mochel
 * Copyright (c) 2007 SUSE Linux Products GmbH
 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
 */
9

10
#include <linux/sched.h>
11 12 13 14 15 16 17 18 19
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/hash.h>

#include "kernfs-internal.h"

20
DEFINE_MUTEX(kernfs_mutex);
21
static DEFINE_SPINLOCK(kernfs_rename_lock);	/* kn->parent and ->name */
22 23 24 25 26 27 28 29 30
/*
 * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
 * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
 * will perform wakeups when releasing console_sem. Holding rename_lock
 * will introduce deadlock if the scheduler reads the kernfs_name in the
 * wakeup path.
 */
static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
static char kernfs_pr_cont_buf[PATH_MAX];	/* protected by pr_cont_lock */
31
static DEFINE_SPINLOCK(kernfs_idr_lock);	/* root->ino_idr */
32

33
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
34

T
Tejun Heo 已提交
35 36 37 38 39 40
static bool kernfs_active(struct kernfs_node *kn)
{
	lockdep_assert_held(&kernfs_mutex);
	return atomic_read(&kn->active) >= 0;
}

41 42 43 44 45 46 47 48 49
static bool kernfs_lockdep(struct kernfs_node *kn)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	return kn->flags & KERNFS_LOCKDEP;
#else
	return false;
#endif
}

50 51
static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
{
52 53 54
	if (!kn)
		return strlcpy(buf, "(null)", buflen);

55 56 57
	return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
}

58 59
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
60
{
61
	size_t depth = 0;
62

63 64 65 66 67 68
	while (to->parent && to != from) {
		depth++;
		to = to->parent;
	}
	return depth;
}
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
						  struct kernfs_node *b)
{
	size_t da, db;
	struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);

	if (ra != rb)
		return NULL;

	da = kernfs_depth(ra->kn, a);
	db = kernfs_depth(rb->kn, b);

	while (da > db) {
		a = a->parent;
		da--;
	}
	while (db > da) {
		b = b->parent;
		db--;
	}

	/* worst case b and a will be the same at root */
	while (b != a) {
		b = b->parent;
		a = a->parent;
	}

	return a;
}

/**
 * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
 * where kn_from is treated as root of the path.
 * @kn_from: kernfs node which should be treated as root for the path
 * @kn_to: kernfs node to which path is needed
 * @buf: buffer to copy the path into
 * @buflen: size of @buf
 *
 * We need to handle couple of scenarios here:
 * [1] when @kn_from is an ancestor of @kn_to at some level
 * kn_from: /n1/n2/n3
 * kn_to:   /n1/n2/n3/n4/n5
 * result:  /n4/n5
 *
 * [2] when @kn_from is on a different hierarchy and we need to find common
 * ancestor between @kn_from and @kn_to.
 * kn_from: /n1/n2/n3/n4
 * kn_to:   /n1/n2/n5
 * result:  /../../n5
 * OR
 * kn_from: /n1/n2/n3/n4/n5   [depth=5]
 * kn_to:   /n1/n2/n3         [depth=3]
 * result:  /../..
 *
124 125
 * [3] when @kn_to is NULL result will be "(null)"
 *
126 127 128
 * Returns the length of the full path.  If the full length is equal to or
 * greater than @buflen, @buf contains the truncated path with the trailing
 * '\0'.  On error, -errno is returned.
129 130 131 132 133 134 135
 */
static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
					struct kernfs_node *kn_from,
					char *buf, size_t buflen)
{
	struct kernfs_node *kn, *common;
	const char parent_str[] = "/..";
136 137
	size_t depth_from, depth_to, len = 0;
	int i, j;
138

139 140 141
	if (!kn_to)
		return strlcpy(buf, "(null)", buflen);

142 143 144 145 146 147
	if (!kn_from)
		kn_from = kernfs_root(kn_to)->kn;

	if (kn_from == kn_to)
		return strlcpy(buf, "/", buflen);

148 149 150
	if (!buf)
		return -EINVAL;

151 152
	common = kernfs_common_ancestor(kn_from, kn_to);
	if (WARN_ON(!common))
153
		return -EINVAL;
154 155 156 157

	depth_to = kernfs_depth(common, kn_to);
	depth_from = kernfs_depth(common, kn_from);

158
	buf[0] = '\0';
159 160 161 162 163 164

	for (i = 0; i < depth_from; i++)
		len += strlcpy(buf + len, parent_str,
			       len < buflen ? buflen - len : 0);

	/* Calculate how many bytes we need for the rest */
165 166 167 168 169 170 171
	for (i = depth_to - 1; i >= 0; i--) {
		for (kn = kn_to, j = 0; j < i; j++)
			kn = kn->parent;
		len += strlcpy(buf + len, "/",
			       len < buflen ? buflen - len : 0);
		len += strlcpy(buf + len, kn->name,
			       len < buflen ? buflen - len : 0);
172
	}
173

174
	return len;
175 176 177 178 179 180 181 182 183 184 185 186
}

/**
 * kernfs_name - obtain the name of a given node
 * @kn: kernfs_node of interest
 * @buf: buffer to copy @kn's name into
 * @buflen: size of @buf
 *
 * Copies the name of @kn into @buf of @buflen bytes.  The behavior is
 * similar to strlcpy().  It returns the length of @kn's name and if @buf
 * isn't long enough, it's filled upto @buflen-1 and nul terminated.
 *
187 188
 * Fills buffer with "(null)" if @kn is NULL.
 *
189 190 191 192 193 194 195 196 197 198 199 200 201
 * This function can be called from any context.
 */
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&kernfs_rename_lock, flags);
	ret = kernfs_name_locked(kn, buf, buflen);
	spin_unlock_irqrestore(&kernfs_rename_lock, flags);
	return ret;
}

202 203 204 205 206 207 208 209 210 211 212 213
/**
 * kernfs_path_from_node - build path of node @to relative to @from.
 * @from: parent kernfs_node relative to which we need to build the path
 * @to: kernfs_node of interest
 * @buf: buffer to copy @to's path into
 * @buflen: size of @buf
 *
 * Builds @to's path relative to @from in @buf. @from and @to must
 * be on the same kernfs-root. If @from is not parent of @to, then a relative
 * path (which includes '..'s) as needed to reach from @from to @to is
 * returned.
 *
214 215 216
 * Returns the length of the full path.  If the full length is equal to or
 * greater than @buflen, @buf contains the truncated path with the trailing
 * '\0'.  On error, -errno is returned.
217 218 219 220 221 222 223 224 225 226 227 228 229 230
 */
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
			  char *buf, size_t buflen)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&kernfs_rename_lock, flags);
	ret = kernfs_path_from_node_locked(to, from, buf, buflen);
	spin_unlock_irqrestore(&kernfs_rename_lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);

231 232 233 234 235 236 237 238 239 240
/**
 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
 * @kn: kernfs_node of interest
 *
 * This function can be called from any context.
 */
void pr_cont_kernfs_name(struct kernfs_node *kn)
{
	unsigned long flags;

241
	spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
242

243
	kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
244 245
	pr_cont("%s", kernfs_pr_cont_buf);

246
	spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
247 248 249 250 251 252 253 254 255 256 257
}

/**
 * pr_cont_kernfs_path - pr_cont path of a kernfs_node
 * @kn: kernfs_node of interest
 *
 * This function can be called from any context.
 */
void pr_cont_kernfs_path(struct kernfs_node *kn)
{
	unsigned long flags;
258
	int sz;
259

260
	spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
261

262 263
	sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
				   sizeof(kernfs_pr_cont_buf));
264 265 266 267 268 269 270 271 272 273 274
	if (sz < 0) {
		pr_cont("(error)");
		goto out;
	}

	if (sz >= sizeof(kernfs_pr_cont_buf)) {
		pr_cont("(name too long)");
		goto out;
	}

	pr_cont("%s", kernfs_pr_cont_buf);
275

276
out:
277
	spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags);
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
}

/**
 * kernfs_get_parent - determine the parent node and pin it
 * @kn: kernfs_node of interest
 *
 * Determines @kn's parent, pins and returns it.  This function can be
 * called from any context.
 */
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
{
	struct kernfs_node *parent;
	unsigned long flags;

	spin_lock_irqsave(&kernfs_rename_lock, flags);
	parent = kn->parent;
	kernfs_get(parent);
	spin_unlock_irqrestore(&kernfs_rename_lock, flags);

	return parent;
}

300
/**
301
 *	kernfs_name_hash
302 303 304 305 306
 *	@name: Null terminated string to hash
 *	@ns:   Namespace tag to hash
 *
 *	Returns 31 bit hash of ns + name (so it fits in an off_t )
 */
307
static unsigned int kernfs_name_hash(const char *name, const void *ns)
308
{
309
	unsigned long hash = init_name_hash(ns);
310 311 312
	unsigned int len = strlen(name);
	while (len--)
		hash = partial_name_hash(*name++, hash);
313
	hash = end_name_hash(hash);
314 315
	hash &= 0x7fffffffU;
	/* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
R
Richard Cochran 已提交
316
	if (hash < 2)
317 318 319 320 321 322
		hash += 2;
	if (hash >= INT_MAX)
		hash = INT_MAX - 1;
	return hash;
}

323 324
static int kernfs_name_compare(unsigned int hash, const char *name,
			       const void *ns, const struct kernfs_node *kn)
325
{
326 327 328 329 330 331 332 333
	if (hash < kn->hash)
		return -1;
	if (hash > kn->hash)
		return 1;
	if (ns < kn->ns)
		return -1;
	if (ns > kn->ns)
		return 1;
334
	return strcmp(name, kn->name);
335 336
}

337 338
static int kernfs_sd_compare(const struct kernfs_node *left,
			     const struct kernfs_node *right)
339
{
340
	return kernfs_name_compare(left->hash, left->name, left->ns, right);
341 342 343
}

/**
344
 *	kernfs_link_sibling - link kernfs_node into sibling rbtree
345
 *	@kn: kernfs_node of interest
346
 *
347
 *	Link @kn into its sibling rbtree which starts from
348
 *	@kn->parent->dir.children.
349 350
 *
 *	Locking:
351
 *	mutex_lock(kernfs_mutex)
352 353 354 355
 *
 *	RETURNS:
 *	0 on susccess -EEXIST on failure.
 */
356
static int kernfs_link_sibling(struct kernfs_node *kn)
357
{
358
	struct rb_node **node = &kn->parent->dir.children.rb_node;
359 360 361
	struct rb_node *parent = NULL;

	while (*node) {
362
		struct kernfs_node *pos;
363 364
		int result;

365
		pos = rb_to_kn(*node);
366
		parent = *node;
367
		result = kernfs_sd_compare(kn, pos);
368
		if (result < 0)
369
			node = &pos->rb.rb_left;
370
		else if (result > 0)
371
			node = &pos->rb.rb_right;
372 373 374
		else
			return -EEXIST;
	}
J
Jianyu Zhan 已提交
375

376
	/* add new node and rebalance the tree */
377 378
	rb_link_node(&kn->rb, parent, node);
	rb_insert_color(&kn->rb, &kn->parent->dir.children);
J
Jianyu Zhan 已提交
379 380 381 382 383

	/* successfully added, account subdir number */
	if (kernfs_type(kn) == KERNFS_DIR)
		kn->parent->dir.subdirs++;

384 385 386 387
	return 0;
}

/**
388
 *	kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
389
 *	@kn: kernfs_node of interest
390
 *
391 392 393
 *	Try to unlink @kn from its sibling rbtree which starts from
 *	kn->parent->dir.children.  Returns %true if @kn was actually
 *	removed, %false if @kn wasn't on the rbtree.
394 395
 *
 *	Locking:
396
 *	mutex_lock(kernfs_mutex)
397
 */
398
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
399
{
400 401 402
	if (RB_EMPTY_NODE(&kn->rb))
		return false;

T
Tejun Heo 已提交
403
	if (kernfs_type(kn) == KERNFS_DIR)
404
		kn->parent->dir.subdirs--;
405

406
	rb_erase(&kn->rb, &kn->parent->dir.children);
407 408
	RB_CLEAR_NODE(&kn->rb);
	return true;
409 410 411
}

/**
412
 *	kernfs_get_active - get an active reference to kernfs_node
413
 *	@kn: kernfs_node to get an active reference to
414
 *
415
 *	Get an active reference of @kn.  This function is noop if @kn
416 417 418
 *	is NULL.
 *
 *	RETURNS:
419
 *	Pointer to @kn on success, NULL on failure.
420
 */
421
struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
422
{
423
	if (unlikely(!kn))
424 425
		return NULL;

426 427
	if (!atomic_inc_unless_negative(&kn->active))
		return NULL;
428

429
	if (kernfs_lockdep(kn))
430 431
		rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
	return kn;
432 433 434
}

/**
435
 *	kernfs_put_active - put an active reference to kernfs_node
436
 *	@kn: kernfs_node to put an active reference to
437
 *
438
 *	Put an active reference to @kn.  This function is noop if @kn
439 440
 *	is NULL.
 */
441
void kernfs_put_active(struct kernfs_node *kn)
442 443 444
{
	int v;

445
	if (unlikely(!kn))
446 447
		return;

448
	if (kernfs_lockdep(kn))
449
		rwsem_release(&kn->dep_map, _RET_IP_);
450
	v = atomic_dec_return(&kn->active);
T
Tejun Heo 已提交
451
	if (likely(v != KN_DEACTIVATED_BIAS))
452 453
		return;

454
	wake_up_all(&kernfs_root(kn)->deactivate_waitq);
455 456 457
}

/**
T
Tejun Heo 已提交
458 459
 * kernfs_drain - drain kernfs_node
 * @kn: kernfs_node to drain
460
 *
T
Tejun Heo 已提交
461 462 463
 * Drain existing usages and nuke all existing mmaps of @kn.  Mutiple
 * removers may invoke this function concurrently on @kn and all will
 * return after draining is complete.
464
 */
T
Tejun Heo 已提交
465
static void kernfs_drain(struct kernfs_node *kn)
466
	__releases(&kernfs_mutex) __acquires(&kernfs_mutex)
467
{
468
	struct kernfs_root *root = kernfs_root(kn);
469

470
	lockdep_assert_held(&kernfs_mutex);
T
Tejun Heo 已提交
471
	WARN_ON_ONCE(kernfs_active(kn));
472

473
	mutex_unlock(&kernfs_mutex);
474

475
	if (kernfs_lockdep(kn)) {
476 477 478 479
		rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
		if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
			lock_contended(&kn->dep_map, _RET_IP_);
	}
480

481
	/* but everyone should wait for draining */
482 483
	wait_event(root->deactivate_waitq,
		   atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
484

485
	if (kernfs_lockdep(kn)) {
486
		lock_acquired(&kn->dep_map, _RET_IP_);
487
		rwsem_release(&kn->dep_map, _RET_IP_);
488
	}
489

490
	kernfs_drain_open_files(kn);
491

492
	mutex_lock(&kernfs_mutex);
493 494 495
}

/**
496 497
 * kernfs_get - get a reference count on a kernfs_node
 * @kn: the target kernfs_node
498
 */
499
void kernfs_get(struct kernfs_node *kn)
500
{
501
	if (kn) {
502 503
		WARN_ON(!atomic_read(&kn->count));
		atomic_inc(&kn->count);
504 505 506 507 508
	}
}
EXPORT_SYMBOL_GPL(kernfs_get);

/**
509 510
 * kernfs_put - put a reference count on a kernfs_node
 * @kn: the target kernfs_node
511
 *
512
 * Put a reference count of @kn and destroy it if it reached zero.
513
 */
514
void kernfs_put(struct kernfs_node *kn)
515
{
516
	struct kernfs_node *parent;
517
	struct kernfs_root *root;
518

519
	if (!kn || !atomic_dec_and_test(&kn->count))
520
		return;
521
	root = kernfs_root(kn);
522
 repeat:
T
Tejun Heo 已提交
523 524
	/*
	 * Moving/renaming is always done while holding reference.
525
	 * kn->parent won't change beneath us.
526
	 */
527
	parent = kn->parent;
528

T
Tejun Heo 已提交
529 530 531
	WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
		  "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
		  parent ? parent->name : "", kn->name, atomic_read(&kn->active));
532

T
Tejun Heo 已提交
533
	if (kernfs_type(kn) == KERNFS_LINK)
534
		kernfs_put(kn->symlink.target_kn);
T
Tejun Heo 已提交
535 536 537

	kfree_const(kn->name);

538 539
	if (kn->iattr) {
		simple_xattrs_free(&kn->iattr->xattrs);
540
		kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
541
	}
542
	spin_lock(&kernfs_idr_lock);
543
	idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
544
	spin_unlock(&kernfs_idr_lock);
545
	kmem_cache_free(kernfs_node_cache, kn);
546

547 548
	kn = parent;
	if (kn) {
549
		if (atomic_dec_and_test(&kn->count))
550 551
			goto repeat;
	} else {
552
		/* just released the root kn, free @root too */
553
		idr_destroy(&root->ino_idr);
554 555
		kfree(root);
	}
556 557 558
}
EXPORT_SYMBOL_GPL(kernfs_put);

559
static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
560
{
561
	struct kernfs_node *kn;
562 563 564 565

	if (flags & LOOKUP_RCU)
		return -ECHILD;

T
Tejun Heo 已提交
566
	/* Always perform fresh lookup for negatives */
567
	if (d_really_is_negative(dentry))
T
Tejun Heo 已提交
568 569
		goto out_bad_unlocked;

S
Shaohua Li 已提交
570
	kn = kernfs_dentry_node(dentry);
571
	mutex_lock(&kernfs_mutex);
572

T
Tejun Heo 已提交
573 574
	/* The kernfs node has been deactivated */
	if (!kernfs_active(kn))
575 576
		goto out_bad;

577
	/* The kernfs node has been moved? */
S
Shaohua Li 已提交
578
	if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
579 580
		goto out_bad;

581
	/* The kernfs node has been renamed */
582
	if (strcmp(dentry->d_name.name, kn->name) != 0)
583 584
		goto out_bad;

585
	/* The kernfs node has been moved to a different namespace */
586
	if (kn->parent && kernfs_ns_enabled(kn->parent) &&
587
	    kernfs_info(dentry->d_sb)->ns != kn->ns)
588 589
		goto out_bad;

590
	mutex_unlock(&kernfs_mutex);
591 592
	return 1;
out_bad:
593
	mutex_unlock(&kernfs_mutex);
T
Tejun Heo 已提交
594
out_bad_unlocked:
595 596 597
	return 0;
}

598
const struct dentry_operations kernfs_dops = {
599
	.d_revalidate	= kernfs_dop_revalidate,
600 601
};

602 603 604 605 606 607 608 609 610 611 612 613 614
/**
 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
 * @dentry: the dentry in question
 *
 * Return the kernfs_node associated with @dentry.  If @dentry is not a
 * kernfs one, %NULL is returned.
 *
 * While the returned kernfs_node will stay accessible as long as @dentry
 * is accessible, the returned node can be in any state and the caller is
 * fully responsible for determining what's accessible.
 */
struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
{
S
Shaohua Li 已提交
615 616 617
	if (dentry->d_sb->s_op == &kernfs_sops &&
	    !d_really_is_negative(dentry))
		return kernfs_dentry_node(dentry);
618 619 620
	return NULL;
}

621
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
622
					     struct kernfs_node *parent,
623
					     const char *name, umode_t mode,
624
					     kuid_t uid, kgid_t gid,
625
					     unsigned flags)
626
{
627
	struct kernfs_node *kn;
628
	u32 id_highbits;
629
	int ret;
630

T
Tejun Heo 已提交
631 632 633
	name = kstrdup_const(name, GFP_KERNEL);
	if (!name)
		return NULL;
634

635
	kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
636
	if (!kn)
637 638
		goto err_out1;

639 640
	idr_preload(GFP_KERNEL);
	spin_lock(&kernfs_idr_lock);
S
Shaohua Li 已提交
641
	ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
642 643 644 645
	if (ret >= 0 && ret < root->last_id_lowbits)
		root->id_highbits++;
	id_highbits = root->id_highbits;
	root->last_id_lowbits = ret;
646 647
	spin_unlock(&kernfs_idr_lock);
	idr_preload_end();
648
	if (ret < 0)
649
		goto err_out2;
650

651
	kn->id = (u64)id_highbits << 32 | ret;
652

653
	atomic_set(&kn->count, 1);
T
Tejun Heo 已提交
654
	atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
655
	RB_CLEAR_NODE(&kn->rb);
656

657 658
	kn->name = name;
	kn->mode = mode;
T
Tejun Heo 已提交
659
	kn->flags = flags;
660

661 662 663 664 665 666 667 668 669 670 671 672
	if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
		struct iattr iattr = {
			.ia_valid = ATTR_UID | ATTR_GID,
			.ia_uid = uid,
			.ia_gid = gid,
		};

		ret = __kernfs_setattr(kn, &iattr);
		if (ret < 0)
			goto err_out3;
	}

673 674 675 676 677 678
	if (parent) {
		ret = security_kernfs_init_security(parent, kn);
		if (ret)
			goto err_out3;
	}

679
	return kn;
680

681
 err_out3:
682
	idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
683
 err_out2:
684
	kmem_cache_free(kernfs_node_cache, kn);
685
 err_out1:
T
Tejun Heo 已提交
686
	kfree_const(name);
687 688 689
	return NULL;
}

690 691
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
				    const char *name, umode_t mode,
692
				    kuid_t uid, kgid_t gid,
693 694 695 696
				    unsigned flags)
{
	struct kernfs_node *kn;

697
	kn = __kernfs_new_node(kernfs_root(parent), parent,
698
			       name, mode, uid, gid, flags);
699 700 701 702 703 704 705
	if (kn) {
		kernfs_get(parent);
		kn->parent = parent;
	}
	return kn;
}

706
/*
707
 * kernfs_find_and_get_node_by_id - get kernfs_node from node id
708
 * @root: the kernfs root
709 710 711 712
 * @id: the target node id
 *
 * @id's lower 32bits encode ino and upper gen.  If the gen portion is
 * zero, all generations are matched.
713 714 715 716
 *
 * RETURNS:
 * NULL on failure. Return a kernfs node with reference counter incremented
 */
717 718
struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
						   u64 id)
719 720
{
	struct kernfs_node *kn;
721 722
	ino_t ino = kernfs_id_ino(id);
	u32 gen = kernfs_id_gen(id);
723

724 725
	spin_lock(&kernfs_idr_lock);

726
	kn = idr_find(&root->ino_idr, (u32)ino);
727
	if (!kn)
728
		goto err_unlock;
729

730 731 732 733 734 735 736 737 738
	if (sizeof(ino_t) >= sizeof(u64)) {
		/* we looked up with the low 32bits, compare the whole */
		if (kernfs_ino(kn) != ino)
			goto err_unlock;
	} else {
		/* 0 matches all generations */
		if (unlikely(gen && kernfs_gen(kn) != gen))
			goto err_unlock;
	}
739

740 741 742 743 744 745 746
	/*
	 * ACTIVATED is protected with kernfs_mutex but it was clear when
	 * @kn was added to idr and we just wanna see it set.  No need to
	 * grab kernfs_mutex.
	 */
	if (unlikely(!(kn->flags & KERNFS_ACTIVATED) ||
		     !atomic_inc_not_zero(&kn->count)))
747
		goto err_unlock;
748

749
	spin_unlock(&kernfs_idr_lock);
750
	return kn;
751 752
err_unlock:
	spin_unlock(&kernfs_idr_lock);
753 754 755
	return NULL;
}

756
/**
757
 *	kernfs_add_one - add kernfs_node to parent without warning
758
 *	@kn: kernfs_node to be added
759
 *
760 761 762
 *	The caller must already have initialized @kn->parent.  This
 *	function increments nlink of the parent's inode if @kn is a
 *	directory and link into the children list of the parent.
763 764 765 766 767
 *
 *	RETURNS:
 *	0 on success, -EEXIST if entry with the given name already
 *	exists.
 */
T
Tejun Heo 已提交
768
int kernfs_add_one(struct kernfs_node *kn)
769
{
770
	struct kernfs_node *parent = kn->parent;
771
	struct kernfs_iattrs *ps_iattr;
T
Tejun Heo 已提交
772
	bool has_ns;
773 774
	int ret;

T
Tejun Heo 已提交
775 776 777 778 779 780 781
	mutex_lock(&kernfs_mutex);

	ret = -EINVAL;
	has_ns = kernfs_ns_enabled(parent);
	if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
		 has_ns ? "required" : "invalid", parent->name, kn->name))
		goto out_unlock;
782

T
Tejun Heo 已提交
783
	if (kernfs_type(parent) != KERNFS_DIR)
T
Tejun Heo 已提交
784
		goto out_unlock;
785

T
Tejun Heo 已提交
786
	ret = -ENOENT;
787 788 789
	if (parent->flags & KERNFS_EMPTY_DIR)
		goto out_unlock;

790
	if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
T
Tejun Heo 已提交
791
		goto out_unlock;
792

793
	kn->hash = kernfs_name_hash(kn->name, kn->ns);
794

795
	ret = kernfs_link_sibling(kn);
796
	if (ret)
T
Tejun Heo 已提交
797
		goto out_unlock;
798 799

	/* Update timestamps on the parent */
800
	ps_iattr = parent->iattr;
801
	if (ps_iattr) {
802 803
		ktime_get_real_ts64(&ps_iattr->ia_ctime);
		ps_iattr->ia_mtime = ps_iattr->ia_ctime;
804 805
	}

806 807 808 809 810 811 812 813 814 815 816 817 818
	mutex_unlock(&kernfs_mutex);

	/*
	 * Activate the new node unless CREATE_DEACTIVATED is requested.
	 * If not activated here, the kernfs user is responsible for
	 * activating the node with kernfs_activate().  A node which hasn't
	 * been activated is not visible to userland and its removal won't
	 * trigger deactivation.
	 */
	if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
		kernfs_activate(kn);
	return 0;

T
Tejun Heo 已提交
819
out_unlock:
820
	mutex_unlock(&kernfs_mutex);
T
Tejun Heo 已提交
821
	return ret;
822 823 824
}

/**
825 826
 * kernfs_find_ns - find kernfs_node with the given name
 * @parent: kernfs_node to search under
827 828 829
 * @name: name to look for
 * @ns: the namespace tag to use
 *
830 831
 * Look for kernfs_node with name @name under @parent.  Returns pointer to
 * the found kernfs_node on success, %NULL on failure.
832
 */
833 834 835
static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
					  const unsigned char *name,
					  const void *ns)
836
{
837
	struct rb_node *node = parent->dir.children.rb_node;
838
	bool has_ns = kernfs_ns_enabled(parent);
839 840
	unsigned int hash;

841
	lockdep_assert_held(&kernfs_mutex);
842 843

	if (has_ns != (bool)ns) {
844
		WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
845
		     has_ns ? "required" : "invalid", parent->name, name);
846 847 848
		return NULL;
	}

849
	hash = kernfs_name_hash(name, ns);
850
	while (node) {
851
		struct kernfs_node *kn;
852 853
		int result;

854
		kn = rb_to_kn(node);
855
		result = kernfs_name_compare(hash, name, ns, kn);
856 857 858 859 860
		if (result < 0)
			node = node->rb_left;
		else if (result > 0)
			node = node->rb_right;
		else
861
			return kn;
862 863 864 865
	}
	return NULL;
}

866 867 868 869
static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
					  const unsigned char *path,
					  const void *ns)
{
870 871
	size_t len;
	char *p, *name;
872 873 874

	lockdep_assert_held(&kernfs_mutex);

875
	spin_lock_irq(&kernfs_pr_cont_lock);
876 877 878 879

	len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));

	if (len >= sizeof(kernfs_pr_cont_buf)) {
880
		spin_unlock_irq(&kernfs_pr_cont_lock);
881
		return NULL;
882 883 884
	}

	p = kernfs_pr_cont_buf;
885 886 887 888 889 890 891

	while ((name = strsep(&p, "/")) && parent) {
		if (*name == '\0')
			continue;
		parent = kernfs_find_ns(parent, name, ns);
	}

892
	spin_unlock_irq(&kernfs_pr_cont_lock);
893

894 895 896
	return parent;
}

897
/**
898 899
 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
 * @parent: kernfs_node to search under
900 901 902
 * @name: name to look for
 * @ns: the namespace tag to use
 *
903
 * Look for kernfs_node with name @name under @parent and get a reference
904
 * if found.  This function may sleep and returns pointer to the found
905
 * kernfs_node on success, %NULL on failure.
906
 */
907 908
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
					   const char *name, const void *ns)
909
{
910
	struct kernfs_node *kn;
911

912
	mutex_lock(&kernfs_mutex);
913 914
	kn = kernfs_find_ns(parent, name, ns);
	kernfs_get(kn);
915
	mutex_unlock(&kernfs_mutex);
916

917
	return kn;
918 919 920
}
EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
/**
 * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
 * @parent: kernfs_node to search under
 * @path: path to look for
 * @ns: the namespace tag to use
 *
 * Look for kernfs_node with path @path under @parent and get a reference
 * if found.  This function may sleep and returns pointer to the found
 * kernfs_node on success, %NULL on failure.
 */
struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
					   const char *path, const void *ns)
{
	struct kernfs_node *kn;

	mutex_lock(&kernfs_mutex);
	kn = kernfs_walk_ns(parent, path, ns);
	kernfs_get(kn);
	mutex_unlock(&kernfs_mutex);

	return kn;
}

944 945
/**
 * kernfs_create_root - create a new kernfs hierarchy
946
 * @scops: optional syscall operations for the hierarchy
947
 * @flags: KERNFS_ROOT_* flags
948 949 950 951 952
 * @priv: opaque data associated with the new directory
 *
 * Returns the root of the new hierarchy on success, ERR_PTR() value on
 * failure.
 */
953
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
954
				       unsigned int flags, void *priv)
955 956
{
	struct kernfs_root *root;
957
	struct kernfs_node *kn;
958 959 960 961 962

	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root)
		return ERR_PTR(-ENOMEM);

963
	idr_init(&root->ino_idr);
964
	INIT_LIST_HEAD(&root->supers);
965 966 967 968 969 970 971 972 973 974 975

	/*
	 * On 64bit ino setups, id is ino.  On 32bit, low 32bits are ino.
	 * High bits generation.  The starting value for both ino and
	 * genenration is 1.  Initialize upper 32bit allocation
	 * accordingly.
	 */
	if (sizeof(ino_t) >= sizeof(u64))
		root->id_highbits = 0;
	else
		root->id_highbits = 1;
976

977
	kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO,
978
			       GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
979
			       KERNFS_DIR);
980
	if (!kn) {
981
		idr_destroy(&root->ino_idr);
982 983 984 985
		kfree(root);
		return ERR_PTR(-ENOMEM);
	}

986
	kn->priv = priv;
987
	kn->dir.root = root;
988

989
	root->syscall_ops = scops;
990
	root->flags = flags;
991
	root->kn = kn;
992
	init_waitqueue_head(&root->deactivate_waitq);
993

994 995 996
	if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
		kernfs_activate(kn);

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	return root;
}

/**
 * kernfs_destroy_root - destroy a kernfs hierarchy
 * @root: root of the hierarchy to destroy
 *
 * Destroy the hierarchy anchored at @root by removing all existing
 * directories and destroying @root.
 */
void kernfs_destroy_root(struct kernfs_root *root)
{
1009
	kernfs_remove(root->kn);	/* will also free @root */
1010 1011
}

1012 1013 1014 1015
/**
 * kernfs_create_dir_ns - create a directory
 * @parent: parent in which to create a new directory
 * @name: name of the new directory
1016
 * @mode: mode of the new directory
1017 1018
 * @uid: uid of the new directory
 * @gid: gid of the new directory
1019 1020 1021 1022 1023
 * @priv: opaque data associated with the new directory
 * @ns: optional namespace tag of the directory
 *
 * Returns the created node on success, ERR_PTR() value on failure.
 */
1024
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
1025
					 const char *name, umode_t mode,
1026
					 kuid_t uid, kgid_t gid,
1027
					 void *priv, const void *ns)
1028
{
1029
	struct kernfs_node *kn;
1030 1031 1032
	int rc;

	/* allocate */
1033 1034
	kn = kernfs_new_node(parent, name, mode | S_IFDIR,
			     uid, gid, KERNFS_DIR);
1035
	if (!kn)
1036 1037
		return ERR_PTR(-ENOMEM);

1038 1039
	kn->dir.root = parent->dir.root;
	kn->ns = ns;
1040
	kn->priv = priv;
1041 1042

	/* link in */
T
Tejun Heo 已提交
1043
	rc = kernfs_add_one(kn);
1044
	if (!rc)
1045
		return kn;
1046

1047
	kernfs_put(kn);
1048 1049 1050
	return ERR_PTR(rc);
}

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
/**
 * kernfs_create_empty_dir - create an always empty directory
 * @parent: parent in which to create a new directory
 * @name: name of the new directory
 *
 * Returns the created node on success, ERR_PTR() value on failure.
 */
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
					    const char *name)
{
	struct kernfs_node *kn;
	int rc;

	/* allocate */
1065 1066
	kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
			     GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	if (!kn)
		return ERR_PTR(-ENOMEM);

	kn->flags |= KERNFS_EMPTY_DIR;
	kn->dir.root = parent->dir.root;
	kn->ns = NULL;
	kn->priv = NULL;

	/* link in */
	rc = kernfs_add_one(kn);
	if (!rc)
		return kn;

	kernfs_put(kn);
	return ERR_PTR(rc);
}

1084 1085 1086
static struct dentry *kernfs_iop_lookup(struct inode *dir,
					struct dentry *dentry,
					unsigned int flags)
1087
{
T
Tejun Heo 已提交
1088
	struct dentry *ret;
S
Shaohua Li 已提交
1089
	struct kernfs_node *parent = dir->i_private;
1090
	struct kernfs_node *kn;
1091 1092 1093
	struct inode *inode;
	const void *ns = NULL;

1094
	mutex_lock(&kernfs_mutex);
1095

1096
	if (kernfs_ns_enabled(parent))
1097
		ns = kernfs_info(dir->i_sb)->ns;
1098

1099
	kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
1100 1101

	/* no such entry */
1102
	if (!kn || !kernfs_active(kn)) {
T
Tejun Heo 已提交
1103
		ret = NULL;
1104 1105 1106 1107
		goto out_unlock;
	}

	/* attach dentry and inode */
1108
	inode = kernfs_get_inode(dir->i_sb, kn);
1109 1110 1111 1112 1113 1114
	if (!inode) {
		ret = ERR_PTR(-ENOMEM);
		goto out_unlock;
	}

	/* instantiate and hash dentry */
1115
	ret = d_splice_alias(inode, dentry);
1116
 out_unlock:
1117
	mutex_unlock(&kernfs_mutex);
1118 1119 1120
	return ret;
}

T
Tejun Heo 已提交
1121 1122 1123 1124
static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
			    umode_t mode)
{
	struct kernfs_node *parent = dir->i_private;
1125
	struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
1126
	int ret;
T
Tejun Heo 已提交
1127

1128
	if (!scops || !scops->mkdir)
T
Tejun Heo 已提交
1129 1130
		return -EPERM;

1131 1132 1133
	if (!kernfs_get_active(parent))
		return -ENODEV;

1134
	ret = scops->mkdir(parent, dentry->d_name.name, mode);
1135 1136 1137

	kernfs_put_active(parent);
	return ret;
T
Tejun Heo 已提交
1138 1139 1140 1141
}

static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
{
S
Shaohua Li 已提交
1142
	struct kernfs_node *kn  = kernfs_dentry_node(dentry);
1143
	struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1144
	int ret;
T
Tejun Heo 已提交
1145

1146
	if (!scops || !scops->rmdir)
T
Tejun Heo 已提交
1147 1148
		return -EPERM;

1149 1150 1151
	if (!kernfs_get_active(kn))
		return -ENODEV;

1152
	ret = scops->rmdir(kn);
1153 1154 1155

	kernfs_put_active(kn);
	return ret;
T
Tejun Heo 已提交
1156 1157 1158
}

static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
1159 1160
			     struct inode *new_dir, struct dentry *new_dentry,
			     unsigned int flags)
T
Tejun Heo 已提交
1161
{
S
Shaohua Li 已提交
1162
	struct kernfs_node *kn = kernfs_dentry_node(old_dentry);
T
Tejun Heo 已提交
1163
	struct kernfs_node *new_parent = new_dir->i_private;
1164
	struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1165
	int ret;
T
Tejun Heo 已提交
1166

1167 1168 1169
	if (flags)
		return -EINVAL;

1170
	if (!scops || !scops->rename)
T
Tejun Heo 已提交
1171 1172
		return -EPERM;

1173 1174 1175 1176 1177 1178 1179 1180
	if (!kernfs_get_active(kn))
		return -ENODEV;

	if (!kernfs_get_active(new_parent)) {
		kernfs_put_active(kn);
		return -ENODEV;
	}

1181
	ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1182 1183 1184 1185

	kernfs_put_active(new_parent);
	kernfs_put_active(kn);
	return ret;
T
Tejun Heo 已提交
1186 1187
}

1188
const struct inode_operations kernfs_dir_iops = {
1189 1190 1191 1192 1193
	.lookup		= kernfs_iop_lookup,
	.permission	= kernfs_iop_permission,
	.setattr	= kernfs_iop_setattr,
	.getattr	= kernfs_iop_getattr,
	.listxattr	= kernfs_iop_listxattr,
T
Tejun Heo 已提交
1194 1195 1196 1197

	.mkdir		= kernfs_iop_mkdir,
	.rmdir		= kernfs_iop_rmdir,
	.rename		= kernfs_iop_rename,
1198 1199
};

1200
static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1201
{
1202
	struct kernfs_node *last;
1203 1204 1205 1206 1207 1208

	while (true) {
		struct rb_node *rbn;

		last = pos;

T
Tejun Heo 已提交
1209
		if (kernfs_type(pos) != KERNFS_DIR)
1210 1211
			break;

1212
		rbn = rb_first(&pos->dir.children);
1213 1214 1215
		if (!rbn)
			break;

1216
		pos = rb_to_kn(rbn);
1217 1218 1219 1220 1221 1222
	}

	return last;
}

/**
1223
 * kernfs_next_descendant_post - find the next descendant for post-order walk
1224
 * @pos: the current position (%NULL to initiate traversal)
1225
 * @root: kernfs_node whose descendants to walk
1226 1227 1228 1229 1230
 *
 * Find the next descendant to visit for post-order traversal of @root's
 * descendants.  @root is included in the iteration and the last node to be
 * visited.
 */
1231 1232
static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
						       struct kernfs_node *root)
1233 1234 1235
{
	struct rb_node *rbn;

1236
	lockdep_assert_held(&kernfs_mutex);
1237 1238 1239

	/* if first iteration, visit leftmost descendant which may be root */
	if (!pos)
1240
		return kernfs_leftmost_descendant(root);
1241 1242 1243 1244 1245 1246

	/* if we visited @root, we're done */
	if (pos == root)
		return NULL;

	/* if there's an unvisited sibling, visit its leftmost descendant */
1247
	rbn = rb_next(&pos->rb);
1248
	if (rbn)
1249
		return kernfs_leftmost_descendant(rb_to_kn(rbn));
1250 1251

	/* no sibling left, visit parent */
1252
	return pos->parent;
1253 1254
}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
/**
 * kernfs_activate - activate a node which started deactivated
 * @kn: kernfs_node whose subtree is to be activated
 *
 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
 * needs to be explicitly activated.  A node which hasn't been activated
 * isn't visible to userland and deactivation is skipped during its
 * removal.  This is useful to construct atomic init sequences where
 * creation of multiple nodes should either succeed or fail atomically.
 *
 * The caller is responsible for ensuring that this function is not called
 * after kernfs_remove*() is invoked on @kn.
 */
void kernfs_activate(struct kernfs_node *kn)
{
	struct kernfs_node *pos;

	mutex_lock(&kernfs_mutex);

	pos = NULL;
	while ((pos = kernfs_next_descendant_post(pos, kn))) {
1276
		if (pos->flags & KERNFS_ACTIVATED)
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
			continue;

		WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
		WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);

		atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
		pos->flags |= KERNFS_ACTIVATED;
	}

	mutex_unlock(&kernfs_mutex);
}

T
Tejun Heo 已提交
1289
static void __kernfs_remove(struct kernfs_node *kn)
1290
{
1291 1292 1293
	struct kernfs_node *pos;

	lockdep_assert_held(&kernfs_mutex);
1294

1295 1296 1297 1298 1299 1300
	/*
	 * Short-circuit if non-root @kn has already finished removal.
	 * This is for kernfs_remove_self() which plays with active ref
	 * after removal.
	 */
	if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
1301 1302
		return;

1303
	pr_debug("kernfs %s: removing\n", kn->name);
1304

T
Tejun Heo 已提交
1305
	/* prevent any new usage under @kn by deactivating all nodes */
1306 1307
	pos = NULL;
	while ((pos = kernfs_next_descendant_post(pos, kn)))
T
Tejun Heo 已提交
1308 1309
		if (kernfs_active(pos))
			atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
1310 1311

	/* deactivate and unlink the subtree node-by-node */
1312
	do {
1313 1314 1315
		pos = kernfs_leftmost_descendant(kn);

		/*
T
Tejun Heo 已提交
1316 1317 1318 1319
		 * kernfs_drain() drops kernfs_mutex temporarily and @pos's
		 * base ref could have been put by someone else by the time
		 * the function returns.  Make sure it doesn't go away
		 * underneath us.
1320 1321 1322
		 */
		kernfs_get(pos);

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
		/*
		 * Drain iff @kn was activated.  This avoids draining and
		 * its lockdep annotations for nodes which have never been
		 * activated and allows embedding kernfs_remove() in create
		 * error paths without worrying about draining.
		 */
		if (kn->flags & KERNFS_ACTIVATED)
			kernfs_drain(pos);
		else
			WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343

		/*
		 * kernfs_unlink_sibling() succeeds once per node.  Use it
		 * to decide who's responsible for cleanups.
		 */
		if (!pos->parent || kernfs_unlink_sibling(pos)) {
			struct kernfs_iattrs *ps_iattr =
				pos->parent ? pos->parent->iattr : NULL;

			/* update timestamps on the parent */
			if (ps_iattr) {
1344 1345
				ktime_get_real_ts64(&ps_iattr->ia_ctime);
				ps_iattr->ia_mtime = ps_iattr->ia_ctime;
1346 1347
			}

T
Tejun Heo 已提交
1348
			kernfs_put(pos);
1349 1350 1351 1352
		}

		kernfs_put(pos);
	} while (pos != kn);
1353 1354 1355
}

/**
1356 1357
 * kernfs_remove - remove a kernfs_node recursively
 * @kn: the kernfs_node to remove
1358
 *
1359
 * Remove @kn along with all its subdirectories and files.
1360
 */
1361
void kernfs_remove(struct kernfs_node *kn)
1362
{
T
Tejun Heo 已提交
1363 1364 1365
	mutex_lock(&kernfs_mutex);
	__kernfs_remove(kn);
	mutex_unlock(&kernfs_mutex);
1366 1367
}

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
/**
 * kernfs_break_active_protection - break out of active protection
 * @kn: the self kernfs_node
 *
 * The caller must be running off of a kernfs operation which is invoked
 * with an active reference - e.g. one of kernfs_ops.  Each invocation of
 * this function must also be matched with an invocation of
 * kernfs_unbreak_active_protection().
 *
 * This function releases the active reference of @kn the caller is
 * holding.  Once this function is called, @kn may be removed at any point
 * and the caller is solely responsible for ensuring that the objects it
 * dereferences are accessible.
 */
void kernfs_break_active_protection(struct kernfs_node *kn)
{
	/*
	 * Take out ourself out of the active ref dependency chain.  If
	 * we're called without an active ref, lockdep will complain.
	 */
	kernfs_put_active(kn);
}

/**
 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
 * @kn: the self kernfs_node
 *
 * If kernfs_break_active_protection() was called, this function must be
 * invoked before finishing the kernfs operation.  Note that while this
 * function restores the active reference, it doesn't and can't actually
 * restore the active protection - @kn may already or be in the process of
 * being removed.  Once kernfs_break_active_protection() is invoked, that
 * protection is irreversibly gone for the kernfs operation instance.
 *
 * While this function may be called at any point after
 * kernfs_break_active_protection() is invoked, its most useful location
 * would be right before the enclosing kernfs operation returns.
 */
void kernfs_unbreak_active_protection(struct kernfs_node *kn)
{
	/*
	 * @kn->active could be in any state; however, the increment we do
	 * here will be undone as soon as the enclosing kernfs operation
	 * finishes and this temporary bump can't break anything.  If @kn
	 * is alive, nothing changes.  If @kn is being deactivated, the
	 * soon-to-follow put will either finish deactivation or restore
	 * deactivated state.  If @kn is already removed, the temporary
	 * bump is guaranteed to be gone before @kn is released.
	 */
	atomic_inc(&kn->active);
	if (kernfs_lockdep(kn))
		rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
}

/**
 * kernfs_remove_self - remove a kernfs_node from its own method
 * @kn: the self kernfs_node to remove
 *
 * The caller must be running off of a kernfs operation which is invoked
 * with an active reference - e.g. one of kernfs_ops.  This can be used to
 * implement a file operation which deletes itself.
 *
 * For example, the "delete" file for a sysfs device directory can be
 * implemented by invoking kernfs_remove_self() on the "delete" file
 * itself.  This function breaks the circular dependency of trying to
 * deactivate self while holding an active ref itself.  It isn't necessary
 * to modify the usual removal path to use kernfs_remove_self().  The
 * "delete" implementation can simply invoke kernfs_remove_self() on self
 * before proceeding with the usual removal path.  kernfs will ignore later
 * kernfs_remove() on self.
 *
 * kernfs_remove_self() can be called multiple times concurrently on the
 * same kernfs_node.  Only the first one actually performs removal and
 * returns %true.  All others will wait until the kernfs operation which
 * won self-removal finishes and return %false.  Note that the losers wait
 * for the completion of not only the winning kernfs_remove_self() but also
 * the whole kernfs_ops which won the arbitration.  This can be used to
 * guarantee, for example, all concurrent writes to a "delete" file to
 * finish only after the whole operation is complete.
 */
bool kernfs_remove_self(struct kernfs_node *kn)
{
	bool ret;

	mutex_lock(&kernfs_mutex);
	kernfs_break_active_protection(kn);

	/*
	 * SUICIDAL is used to arbitrate among competing invocations.  Only
	 * the first one will actually perform removal.  When the removal
	 * is complete, SUICIDED is set and the active ref is restored
	 * while holding kernfs_mutex.  The ones which lost arbitration
	 * waits for SUICDED && drained which can happen only after the
	 * enclosing kernfs operation which executed the winning instance
	 * of kernfs_remove_self() finished.
	 */
	if (!(kn->flags & KERNFS_SUICIDAL)) {
		kn->flags |= KERNFS_SUICIDAL;
		__kernfs_remove(kn);
		kn->flags |= KERNFS_SUICIDED;
		ret = true;
	} else {
		wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
		DEFINE_WAIT(wait);

		while (true) {
			prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);

			if ((kn->flags & KERNFS_SUICIDED) &&
			    atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
				break;

			mutex_unlock(&kernfs_mutex);
			schedule();
			mutex_lock(&kernfs_mutex);
		}
		finish_wait(waitq, &wait);
		WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
		ret = false;
	}

	/*
	 * This must be done while holding kernfs_mutex; otherwise, waiting
	 * for SUICIDED && deactivated could finish prematurely.
	 */
	kernfs_unbreak_active_protection(kn);

	mutex_unlock(&kernfs_mutex);
	return ret;
}

1499
/**
1500 1501 1502 1503
 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
 * @parent: parent of the target
 * @name: name of the kernfs_node to remove
 * @ns: namespace tag of the kernfs_node to remove
1504
 *
1505 1506
 * Look for the kernfs_node with @name and @ns under @parent and remove it.
 * Returns 0 on success, -ENOENT if such entry doesn't exist.
1507
 */
1508
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1509 1510
			     const void *ns)
{
1511
	struct kernfs_node *kn;
1512

1513
	if (!parent) {
1514
		WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1515 1516 1517 1518
			name);
		return -ENOENT;
	}

T
Tejun Heo 已提交
1519
	mutex_lock(&kernfs_mutex);
1520

1521
	kn = kernfs_find_ns(parent, name, ns);
1522 1523
	if (kn) {
		kernfs_get(kn);
T
Tejun Heo 已提交
1524
		__kernfs_remove(kn);
1525 1526
		kernfs_put(kn);
	}
1527

T
Tejun Heo 已提交
1528
	mutex_unlock(&kernfs_mutex);
1529

1530
	if (kn)
1531 1532 1533 1534 1535 1536 1537
		return 0;
	else
		return -ENOENT;
}

/**
 * kernfs_rename_ns - move and rename a kernfs_node
1538
 * @kn: target node
1539 1540 1541 1542
 * @new_parent: new parent to put @sd under
 * @new_name: new name
 * @new_ns: new namespace tag
 */
1543
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1544 1545
		     const char *new_name, const void *new_ns)
{
1546 1547
	struct kernfs_node *old_parent;
	const char *old_name = NULL;
1548 1549
	int error;

1550 1551 1552 1553
	/* can't move or rename root */
	if (!kn->parent)
		return -EINVAL;

1554 1555
	mutex_lock(&kernfs_mutex);

1556
	error = -ENOENT;
1557 1558
	if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
	    (new_parent->flags & KERNFS_EMPTY_DIR))
1559 1560
		goto out;

1561
	error = 0;
1562 1563
	if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
	    (strcmp(kn->name, new_name) == 0))
1564
		goto out;	/* nothing to rename */
1565 1566 1567

	error = -EEXIST;
	if (kernfs_find_ns(new_parent, new_name, new_ns))
1568
		goto out;
1569

1570
	/* rename kernfs_node */
1571
	if (strcmp(kn->name, new_name) != 0) {
1572
		error = -ENOMEM;
1573
		new_name = kstrdup_const(new_name, GFP_KERNEL);
1574
		if (!new_name)
1575
			goto out;
1576 1577
	} else {
		new_name = NULL;
1578 1579 1580 1581 1582
	}

	/*
	 * Move to the appropriate place in the appropriate directories rbtree.
	 */
1583
	kernfs_unlink_sibling(kn);
1584
	kernfs_get(new_parent);
1585 1586 1587 1588 1589

	/* rename_lock protects ->parent and ->name accessors */
	spin_lock_irq(&kernfs_rename_lock);

	old_parent = kn->parent;
1590
	kn->parent = new_parent;
1591 1592 1593

	kn->ns = new_ns;
	if (new_name) {
T
Tejun Heo 已提交
1594
		old_name = kn->name;
1595 1596 1597 1598 1599
		kn->name = new_name;
	}

	spin_unlock_irq(&kernfs_rename_lock);

1600
	kn->hash = kernfs_name_hash(kn->name, kn->ns);
1601
	kernfs_link_sibling(kn);
1602

1603
	kernfs_put(old_parent);
1604
	kfree_const(old_name);
1605

1606
	error = 0;
1607
 out:
1608
	mutex_unlock(&kernfs_mutex);
1609 1610 1611 1612
	return error;
}

/* Relationship between s_mode and the DT_xxx types */
1613
static inline unsigned char dt_type(struct kernfs_node *kn)
1614
{
1615
	return (kn->mode >> 12) & 15;
1616 1617
}

1618
static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1619 1620 1621 1622 1623
{
	kernfs_put(filp->private_data);
	return 0;
}

1624
static struct kernfs_node *kernfs_dir_pos(const void *ns,
1625
	struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1626 1627
{
	if (pos) {
T
Tejun Heo 已提交
1628
		int valid = kernfs_active(pos) &&
1629
			pos->parent == parent && hash == pos->hash;
1630 1631 1632 1633 1634
		kernfs_put(pos);
		if (!valid)
			pos = NULL;
	}
	if (!pos && (hash > 1) && (hash < INT_MAX)) {
1635
		struct rb_node *node = parent->dir.children.rb_node;
1636
		while (node) {
1637
			pos = rb_to_kn(node);
1638

1639
			if (hash < pos->hash)
1640
				node = node->rb_left;
1641
			else if (hash > pos->hash)
1642 1643 1644 1645 1646
				node = node->rb_right;
			else
				break;
		}
	}
1647 1648
	/* Skip over entries which are dying/dead or in the wrong namespace */
	while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
1649
		struct rb_node *node = rb_next(&pos->rb);
1650 1651 1652
		if (!node)
			pos = NULL;
		else
1653
			pos = rb_to_kn(node);
1654 1655 1656 1657
	}
	return pos;
}

1658
static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1659
	struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1660
{
1661
	pos = kernfs_dir_pos(ns, parent, ino, pos);
1662
	if (pos) {
1663
		do {
1664
			struct rb_node *node = rb_next(&pos->rb);
1665 1666 1667
			if (!node)
				pos = NULL;
			else
1668
				pos = rb_to_kn(node);
1669 1670
		} while (pos && (!kernfs_active(pos) || pos->ns != ns));
	}
1671 1672 1673
	return pos;
}

1674
static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1675 1676
{
	struct dentry *dentry = file->f_path.dentry;
S
Shaohua Li 已提交
1677
	struct kernfs_node *parent = kernfs_dentry_node(dentry);
1678
	struct kernfs_node *pos = file->private_data;
1679 1680 1681 1682
	const void *ns = NULL;

	if (!dir_emit_dots(file, ctx))
		return 0;
1683
	mutex_lock(&kernfs_mutex);
1684

1685
	if (kernfs_ns_enabled(parent))
1686
		ns = kernfs_info(dentry->d_sb)->ns;
1687

1688
	for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1689
	     pos;
1690
	     pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1691
		const char *name = pos->name;
1692 1693
		unsigned int type = dt_type(pos);
		int len = strlen(name);
1694
		ino_t ino = kernfs_ino(pos);
1695

1696
		ctx->pos = pos->hash;
1697 1698 1699
		file->private_data = pos;
		kernfs_get(pos);

1700
		mutex_unlock(&kernfs_mutex);
1701 1702
		if (!dir_emit(ctx, name, len, ino, type))
			return 0;
1703
		mutex_lock(&kernfs_mutex);
1704
	}
1705
	mutex_unlock(&kernfs_mutex);
1706 1707 1708 1709 1710
	file->private_data = NULL;
	ctx->pos = INT_MAX;
	return 0;
}

1711
const struct file_operations kernfs_dir_fops = {
1712
	.read		= generic_read_dir,
1713
	.iterate_shared	= kernfs_fop_readdir,
1714
	.release	= kernfs_dir_fop_release,
1715
	.llseek		= generic_file_llseek,
1716
};