file.c 19.4 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * fs/kernfs/file.c - kernfs file implementation
 *
 * Copyright (c) 2001-3 Patrick Mochel
 * Copyright (c) 2007 SUSE Linux Products GmbH
 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 */
10 11 12 13 14 15 16 17 18 19 20

#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/pagemap.h>
#include <linux/sched.h>

#include "kernfs-internal.h"

/*
21
 * There's one kernfs_open_file for each open file and one kernfs_open_node
22
 * for each kernfs_node with one or more open files.
23
 *
24 25
 * kernfs_node->attr.open points to kernfs_open_node.  attr.open is
 * protected by kernfs_open_node_lock.
26 27
 *
 * filp->private_data points to seq_file whose ->private points to
28 29
 * kernfs_open_file.  kernfs_open_files are chained at
 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
30
 */
31 32
static DEFINE_SPINLOCK(kernfs_open_node_lock);
static DEFINE_MUTEX(kernfs_open_file_mutex);
33

34
struct kernfs_open_node {
35 36 37
	atomic_t		refcnt;
	atomic_t		event;
	wait_queue_head_t	poll;
38
	struct list_head	files; /* goes through kernfs_open_file.list */
39 40
};

41
static struct kernfs_open_file *kernfs_of(struct file *file)
42 43 44 45 46
{
	return ((struct seq_file *)file->private_data)->private;
}

/*
47
 * Determine the kernfs_ops for the given kernfs_node.  This function must
48 49
 * be called while holding an active reference.
 */
50
static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
51
{
T
Tejun Heo 已提交
52
	if (kn->flags & KERNFS_LOCKDEP)
53
		lockdep_assert_held(kn);
54
	return kn->attr.ops;
55 56 57 58
}

static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
{
59
	struct kernfs_open_file *of = sf->private;
60 61 62 63 64 65 66
	const struct kernfs_ops *ops;

	/*
	 * @of->mutex nests outside active ref and is just to ensure that
	 * the ops aren't called concurrently for the same open file.
	 */
	mutex_lock(&of->mutex);
67
	if (!sysfs_get_active(of->kn))
68 69
		return ERR_PTR(-ENODEV);

70
	ops = kernfs_ops(of->kn);
71 72 73 74 75 76 77 78 79 80 81 82 83
	if (ops->seq_start) {
		return ops->seq_start(sf, ppos);
	} else {
		/*
		 * The same behavior and code as single_open().  Returns
		 * !NULL if pos is at the beginning; otherwise, NULL.
		 */
		return NULL + !*ppos;
	}
}

static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
{
84
	struct kernfs_open_file *of = sf->private;
85
	const struct kernfs_ops *ops = kernfs_ops(of->kn);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

	if (ops->seq_next) {
		return ops->seq_next(sf, v, ppos);
	} else {
		/*
		 * The same behavior and code as single_open(), always
		 * terminate after the initial read.
		 */
		++*ppos;
		return NULL;
	}
}

static void kernfs_seq_stop(struct seq_file *sf, void *v)
{
101
	struct kernfs_open_file *of = sf->private;
102
	const struct kernfs_ops *ops = kernfs_ops(of->kn);
103 104 105 106

	if (ops->seq_stop)
		ops->seq_stop(sf, v);

107
	sysfs_put_active(of->kn);
108 109 110 111 112
	mutex_unlock(&of->mutex);
}

static int kernfs_seq_show(struct seq_file *sf, void *v)
{
113
	struct kernfs_open_file *of = sf->private;
114

115
	of->event = atomic_read(&of->kn->attr.open->event);
116

117
	return of->kn->attr.ops->seq_show(sf, v);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
}

static const struct seq_operations kernfs_seq_ops = {
	.start = kernfs_seq_start,
	.next = kernfs_seq_next,
	.stop = kernfs_seq_stop,
	.show = kernfs_seq_show,
};

/*
 * As reading a bin file can have side-effects, the exact offset and bytes
 * specified in read(2) call should be passed to the read callback making
 * it difficult to use seq_file.  Implement simplistic custom buffering for
 * bin files.
 */
133
static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
				       char __user *user_buf, size_t count,
				       loff_t *ppos)
{
	ssize_t len = min_t(size_t, count, PAGE_SIZE);
	const struct kernfs_ops *ops;
	char *buf;

	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	/*
	 * @of->mutex nests outside active ref and is just to ensure that
	 * the ops aren't called concurrently for the same open file.
	 */
	mutex_lock(&of->mutex);
150
	if (!sysfs_get_active(of->kn)) {
151 152 153 154 155
		len = -ENODEV;
		mutex_unlock(&of->mutex);
		goto out_free;
	}

156
	ops = kernfs_ops(of->kn);
157 158 159 160 161
	if (ops->read)
		len = ops->read(of, buf, len, *ppos);
	else
		len = -EINVAL;

162
	sysfs_put_active(of->kn);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	mutex_unlock(&of->mutex);

	if (len < 0)
		goto out_free;

	if (copy_to_user(user_buf, buf, len)) {
		len = -EFAULT;
		goto out_free;
	}

	*ppos += len;

 out_free:
	kfree(buf);
	return len;
}

/**
 * kernfs_file_read - kernfs vfs read callback
 * @file: file pointer
 * @user_buf: data to write
 * @count: number of bytes
 * @ppos: starting offset
 */
static ssize_t kernfs_file_read(struct file *file, char __user *user_buf,
				size_t count, loff_t *ppos)
{
190
	struct kernfs_open_file *of = kernfs_of(file);
191

T
Tejun Heo 已提交
192
	if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		return seq_read(file, user_buf, count, ppos);
	else
		return kernfs_file_direct_read(of, user_buf, count, ppos);
}

/**
 * kernfs_file_write - kernfs vfs write callback
 * @file: file pointer
 * @user_buf: data to write
 * @count: number of bytes
 * @ppos: starting offset
 *
 * Copy data in from userland and pass it to the matching kernfs write
 * operation.
 *
 * There is no easy way for us to know if userspace is only doing a partial
 * write, so we don't support them. We expect the entire buffer to come on
 * the first write.  Hint: if you're writing a value, first read the file,
 * modify only the the value you're changing, then write entire buffer
 * back.
 */
static ssize_t kernfs_file_write(struct file *file, const char __user *user_buf,
				 size_t count, loff_t *ppos)
{
217
	struct kernfs_open_file *of = kernfs_of(file);
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	ssize_t len = min_t(size_t, count, PAGE_SIZE);
	const struct kernfs_ops *ops;
	char *buf;

	buf = kmalloc(len + 1, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	if (copy_from_user(buf, user_buf, len)) {
		len = -EFAULT;
		goto out_free;
	}
	buf[len] = '\0';	/* guarantee string termination */

	/*
	 * @of->mutex nests outside active ref and is just to ensure that
	 * the ops aren't called concurrently for the same open file.
	 */
	mutex_lock(&of->mutex);
237
	if (!sysfs_get_active(of->kn)) {
238 239 240 241 242
		mutex_unlock(&of->mutex);
		len = -ENODEV;
		goto out_free;
	}

243
	ops = kernfs_ops(of->kn);
244 245 246 247 248
	if (ops->write)
		len = ops->write(of, buf, len, *ppos);
	else
		len = -EINVAL;

249
	sysfs_put_active(of->kn);
250 251 252 253 254 255 256 257 258 259 260 261
	mutex_unlock(&of->mutex);

	if (len > 0)
		*ppos += len;
out_free:
	kfree(buf);
	return len;
}

static void kernfs_vma_open(struct vm_area_struct *vma)
{
	struct file *file = vma->vm_file;
262
	struct kernfs_open_file *of = kernfs_of(file);
263 264 265 266

	if (!of->vm_ops)
		return;

267
	if (!sysfs_get_active(of->kn))
268 269 270 271 272
		return;

	if (of->vm_ops->open)
		of->vm_ops->open(vma);

273
	sysfs_put_active(of->kn);
274 275 276 277 278
}

static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct file *file = vma->vm_file;
279
	struct kernfs_open_file *of = kernfs_of(file);
280 281 282 283 284
	int ret;

	if (!of->vm_ops)
		return VM_FAULT_SIGBUS;

285
	if (!sysfs_get_active(of->kn))
286 287 288 289 290 291
		return VM_FAULT_SIGBUS;

	ret = VM_FAULT_SIGBUS;
	if (of->vm_ops->fault)
		ret = of->vm_ops->fault(vma, vmf);

292
	sysfs_put_active(of->kn);
293 294 295 296 297 298 299
	return ret;
}

static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
				   struct vm_fault *vmf)
{
	struct file *file = vma->vm_file;
300
	struct kernfs_open_file *of = kernfs_of(file);
301 302 303 304 305
	int ret;

	if (!of->vm_ops)
		return VM_FAULT_SIGBUS;

306
	if (!sysfs_get_active(of->kn))
307 308 309 310 311 312 313 314
		return VM_FAULT_SIGBUS;

	ret = 0;
	if (of->vm_ops->page_mkwrite)
		ret = of->vm_ops->page_mkwrite(vma, vmf);
	else
		file_update_time(file);

315
	sysfs_put_active(of->kn);
316 317 318 319 320 321 322
	return ret;
}

static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
			     void *buf, int len, int write)
{
	struct file *file = vma->vm_file;
323
	struct kernfs_open_file *of = kernfs_of(file);
324 325 326 327 328
	int ret;

	if (!of->vm_ops)
		return -EINVAL;

329
	if (!sysfs_get_active(of->kn))
330 331 332 333 334 335
		return -EINVAL;

	ret = -EINVAL;
	if (of->vm_ops->access)
		ret = of->vm_ops->access(vma, addr, buf, len, write);

336
	sysfs_put_active(of->kn);
337 338 339 340 341 342 343 344
	return ret;
}

#ifdef CONFIG_NUMA
static int kernfs_vma_set_policy(struct vm_area_struct *vma,
				 struct mempolicy *new)
{
	struct file *file = vma->vm_file;
345
	struct kernfs_open_file *of = kernfs_of(file);
346 347 348 349 350
	int ret;

	if (!of->vm_ops)
		return 0;

351
	if (!sysfs_get_active(of->kn))
352 353 354 355 356 357
		return -EINVAL;

	ret = 0;
	if (of->vm_ops->set_policy)
		ret = of->vm_ops->set_policy(vma, new);

358
	sysfs_put_active(of->kn);
359 360 361 362 363 364 365
	return ret;
}

static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
					       unsigned long addr)
{
	struct file *file = vma->vm_file;
366
	struct kernfs_open_file *of = kernfs_of(file);
367 368 369 370 371
	struct mempolicy *pol;

	if (!of->vm_ops)
		return vma->vm_policy;

372
	if (!sysfs_get_active(of->kn))
373 374 375 376 377 378
		return vma->vm_policy;

	pol = vma->vm_policy;
	if (of->vm_ops->get_policy)
		pol = of->vm_ops->get_policy(vma, addr);

379
	sysfs_put_active(of->kn);
380 381 382 383 384 385 386 387
	return pol;
}

static int kernfs_vma_migrate(struct vm_area_struct *vma,
			      const nodemask_t *from, const nodemask_t *to,
			      unsigned long flags)
{
	struct file *file = vma->vm_file;
388
	struct kernfs_open_file *of = kernfs_of(file);
389 390 391 392 393
	int ret;

	if (!of->vm_ops)
		return 0;

394
	if (!sysfs_get_active(of->kn))
395 396 397 398 399 400
		return 0;

	ret = 0;
	if (of->vm_ops->migrate)
		ret = of->vm_ops->migrate(vma, from, to, flags);

401
	sysfs_put_active(of->kn);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	return ret;
}
#endif

static const struct vm_operations_struct kernfs_vm_ops = {
	.open		= kernfs_vma_open,
	.fault		= kernfs_vma_fault,
	.page_mkwrite	= kernfs_vma_page_mkwrite,
	.access		= kernfs_vma_access,
#ifdef CONFIG_NUMA
	.set_policy	= kernfs_vma_set_policy,
	.get_policy	= kernfs_vma_get_policy,
	.migrate	= kernfs_vma_migrate,
#endif
};

static int kernfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
420
	struct kernfs_open_file *of = kernfs_of(file);
421 422 423
	const struct kernfs_ops *ops;
	int rc;

424 425 426 427 428 429 430
	/*
	 * mmap path and of->mutex are prone to triggering spurious lockdep
	 * warnings and we don't want to add spurious locking dependency
	 * between the two.  Check whether mmap is actually implemented
	 * without grabbing @of->mutex by testing HAS_MMAP flag.  See the
	 * comment in kernfs_file_open() for more details.
	 */
T
Tejun Heo 已提交
431
	if (!(of->kn->flags & KERNFS_HAS_MMAP))
432 433
		return -ENODEV;

434 435 436
	mutex_lock(&of->mutex);

	rc = -ENODEV;
437
	if (!sysfs_get_active(of->kn))
438 439
		goto out_unlock;

440
	ops = kernfs_ops(of->kn);
441
	rc = ops->mmap(of, vma);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467

	/*
	 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
	 * to satisfy versions of X which crash if the mmap fails: that
	 * substitutes a new vm_file, and we don't then want bin_vm_ops.
	 */
	if (vma->vm_file != file)
		goto out_put;

	rc = -EINVAL;
	if (of->mmapped && of->vm_ops != vma->vm_ops)
		goto out_put;

	/*
	 * It is not possible to successfully wrap close.
	 * So error if someone is trying to use close.
	 */
	rc = -EINVAL;
	if (vma->vm_ops && vma->vm_ops->close)
		goto out_put;

	rc = 0;
	of->mmapped = 1;
	of->vm_ops = vma->vm_ops;
	vma->vm_ops = &kernfs_vm_ops;
out_put:
468
	sysfs_put_active(of->kn);
469 470 471 472 473 474 475
out_unlock:
	mutex_unlock(&of->mutex);

	return rc;
}

/**
476
 *	sysfs_get_open_dirent - get or create kernfs_open_node
477
 *	@kn: target kernfs_node
478
 *	@of: kernfs_open_file for this instance of open
479
 *
480 481
 *	If @kn->attr.open exists, increment its reference count; otherwise,
 *	create one.  @of is chained to the files list.
482 483 484 485 486 487 488
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
489
static int sysfs_get_open_dirent(struct kernfs_node *kn,
490
				 struct kernfs_open_file *of)
491
{
492
	struct kernfs_open_node *on, *new_on = NULL;
493 494

 retry:
495 496
	mutex_lock(&kernfs_open_file_mutex);
	spin_lock_irq(&kernfs_open_node_lock);
497

498 499 500
	if (!kn->attr.open && new_on) {
		kn->attr.open = new_on;
		new_on = NULL;
501 502
	}

503 504 505 506
	on = kn->attr.open;
	if (on) {
		atomic_inc(&on->refcnt);
		list_add_tail(&of->list, &on->files);
507 508
	}

509 510
	spin_unlock_irq(&kernfs_open_node_lock);
	mutex_unlock(&kernfs_open_file_mutex);
511

512 513
	if (on) {
		kfree(new_on);
514 515 516 517
		return 0;
	}

	/* not there, initialize a new one and retry */
518 519
	new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
	if (!new_on)
520 521
		return -ENOMEM;

522 523 524 525
	atomic_set(&new_on->refcnt, 0);
	atomic_set(&new_on->event, 1);
	init_waitqueue_head(&new_on->poll);
	INIT_LIST_HEAD(&new_on->files);
526 527 528 529
	goto retry;
}

/**
530
 *	sysfs_put_open_dirent - put kernfs_open_node
531
 *	@kn: target kernfs_nodet
532
 *	@of: associated kernfs_open_file
533
 *
534
 *	Put @kn->attr.open and unlink @of from the files list.  If
535 536 537 538 539
 *	reference count reaches zero, disassociate and free it.
 *
 *	LOCKING:
 *	None.
 */
540
static void sysfs_put_open_dirent(struct kernfs_node *kn,
541
				  struct kernfs_open_file *of)
542
{
543
	struct kernfs_open_node *on = kn->attr.open;
544 545
	unsigned long flags;

546 547
	mutex_lock(&kernfs_open_file_mutex);
	spin_lock_irqsave(&kernfs_open_node_lock, flags);
548 549 550 551

	if (of)
		list_del(&of->list);

552
	if (atomic_dec_and_test(&on->refcnt))
553
		kn->attr.open = NULL;
554
	else
555
		on = NULL;
556

557 558
	spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
	mutex_unlock(&kernfs_open_file_mutex);
559

560
	kfree(on);
561 562 563 564
}

static int kernfs_file_open(struct inode *inode, struct file *file)
{
565
	struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
566
	const struct kernfs_ops *ops;
567
	struct kernfs_open_file *of;
568 569 570
	bool has_read, has_write, has_mmap;
	int error = -EACCES;

571
	if (!sysfs_get_active(kn))
572 573
		return -ENODEV;

574
	ops = kernfs_ops(kn);
575 576 577 578 579 580 581 582 583 584 585 586 587 588

	has_read = ops->seq_show || ops->read || ops->mmap;
	has_write = ops->write || ops->mmap;
	has_mmap = ops->mmap;

	/* check perms and supported operations */
	if ((file->f_mode & FMODE_WRITE) &&
	    (!(inode->i_mode & S_IWUGO) || !has_write))
		goto err_out;

	if ((file->f_mode & FMODE_READ) &&
	    (!(inode->i_mode & S_IRUGO) || !has_read))
		goto err_out;

589
	/* allocate a kernfs_open_file for the file */
590
	error = -ENOMEM;
591
	of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
592 593 594 595 596 597 598 599 600 601 602 603 604 605
	if (!of)
		goto err_out;

	/*
	 * The following is done to give a different lockdep key to
	 * @of->mutex for files which implement mmap.  This is a rather
	 * crude way to avoid false positive lockdep warning around
	 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
	 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
	 * which mm->mmap_sem nests, while holding @of->mutex.  As each
	 * open file has a separate mutex, it's okay as long as those don't
	 * happen on the same file.  At this point, we can't easily give
	 * each file a separate locking class.  Let's differentiate on
	 * whether the file has mmap or not for now.
606 607 608
	 *
	 * Both paths of the branch look the same.  They're supposed to
	 * look that way and give @of->mutex different static lockdep keys.
609 610 611 612 613 614
	 */
	if (has_mmap)
		mutex_init(&of->mutex);
	else
		mutex_init(&of->mutex);

615
	of->kn = kn;
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	of->file = file;

	/*
	 * Always instantiate seq_file even if read access doesn't use
	 * seq_file or is not requested.  This unifies private data access
	 * and readable regular files are the vast majority anyway.
	 */
	if (ops->seq_show)
		error = seq_open(file, &kernfs_seq_ops);
	else
		error = seq_open(file, NULL);
	if (error)
		goto err_free;

	((struct seq_file *)file->private_data)->private = of;

	/* seq_file clears PWRITE unconditionally, restore it if WRITE */
	if (file->f_mode & FMODE_WRITE)
		file->f_mode |= FMODE_PWRITE;

	/* make sure we have open dirent struct */
637
	error = sysfs_get_open_dirent(kn, of);
638 639 640 641
	if (error)
		goto err_close;

	/* open succeeded, put active references */
642
	sysfs_put_active(kn);
643 644 645 646 647 648 649
	return 0;

err_close:
	seq_release(inode, file);
err_free:
	kfree(of);
err_out:
650
	sysfs_put_active(kn);
651 652 653 654 655
	return error;
}

static int kernfs_file_release(struct inode *inode, struct file *filp)
{
656
	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
657
	struct kernfs_open_file *of = kernfs_of(filp);
658

659
	sysfs_put_open_dirent(kn, of);
660 661 662 663 664 665
	seq_release(inode, filp);
	kfree(of);

	return 0;
}

666
void sysfs_unmap_bin_file(struct kernfs_node *kn)
667
{
668 669
	struct kernfs_open_node *on;
	struct kernfs_open_file *of;
670

T
Tejun Heo 已提交
671
	if (!(kn->flags & KERNFS_HAS_MMAP))
672 673
		return;

674 675 676 677 678 679
	spin_lock_irq(&kernfs_open_node_lock);
	on = kn->attr.open;
	if (on)
		atomic_inc(&on->refcnt);
	spin_unlock_irq(&kernfs_open_node_lock);
	if (!on)
680 681
		return;

682 683
	mutex_lock(&kernfs_open_file_mutex);
	list_for_each_entry(of, &on->files, list) {
684 685 686
		struct inode *inode = file_inode(of->file);
		unmap_mapping_range(inode->i_mapping, 0, 0, 1);
	}
687
	mutex_unlock(&kernfs_open_file_mutex);
688

689
	sysfs_put_open_dirent(kn, NULL);
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
}

/* Sysfs attribute files are pollable.  The idea is that you read
 * the content and then you use 'poll' or 'select' to wait for
 * the content to change.  When the content changes (assuming the
 * manager for the kobject supports notification), poll will
 * return POLLERR|POLLPRI, and select will return the fd whether
 * it is waiting for read, write, or exceptions.
 * Once poll/select indicates that the value has changed, you
 * need to close and re-open the file, or seek to 0 and read again.
 * Reminder: this only works for attributes which actively support
 * it, and it is not possible to test an attribute from userspace
 * to see if it supports poll (Neither 'poll' nor 'select' return
 * an appropriate error code).  When in doubt, set a suitable timeout value.
 */
static unsigned int kernfs_file_poll(struct file *filp, poll_table *wait)
{
707
	struct kernfs_open_file *of = kernfs_of(filp);
708
	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
709
	struct kernfs_open_node *on = kn->attr.open;
710 711

	/* need parent for the kobj, grab both */
712
	if (!sysfs_get_active(kn))
713 714
		goto trigger;

715
	poll_wait(filp, &on->poll, wait);
716

717
	sysfs_put_active(kn);
718

719
	if (of->event != atomic_read(&on->event))
720 721 722 723 724 725 726 727 728 729
		goto trigger;

	return DEFAULT_POLLMASK;

 trigger:
	return DEFAULT_POLLMASK|POLLERR|POLLPRI;
}

/**
 * kernfs_notify - notify a kernfs file
730
 * @kn: file to notify
731
 *
732
 * Notify @kn such that poll(2) on @kn wakes up.
733
 */
734
void kernfs_notify(struct kernfs_node *kn)
735
{
736
	struct kernfs_open_node *on;
737 738
	unsigned long flags;

739
	spin_lock_irqsave(&kernfs_open_node_lock, flags);
740

T
Tejun Heo 已提交
741
	if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
742 743 744 745
		on = kn->attr.open;
		if (on) {
			atomic_inc(&on->event);
			wake_up_interruptible(&on->poll);
746 747 748
		}
	}

749
	spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
}
EXPORT_SYMBOL_GPL(kernfs_notify);

const struct file_operations kernfs_file_operations = {
	.read		= kernfs_file_read,
	.write		= kernfs_file_write,
	.llseek		= generic_file_llseek,
	.mmap		= kernfs_file_mmap,
	.open		= kernfs_file_open,
	.release	= kernfs_file_release,
	.poll		= kernfs_file_poll,
};

/**
 * kernfs_create_file_ns_key - create a file
 * @parent: directory to create the file in
 * @name: name of the file
 * @mode: mode of the file
 * @size: size of the file
 * @ops: kernfs operations for the file
 * @priv: private data for the file
 * @ns: optional namespace tag of the file
 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
 *
 * Returns the created node on success, ERR_PTR() value on error.
 */
776 777 778 779 780 781
struct kernfs_node *kernfs_create_file_ns_key(struct kernfs_node *parent,
					      const char *name,
					      umode_t mode, loff_t size,
					      const struct kernfs_ops *ops,
					      void *priv, const void *ns,
					      struct lock_class_key *key)
782
{
783
	struct kernfs_addrm_cxt acxt;
784
	struct kernfs_node *kn;
785 786
	int rc;

787
	kn = sysfs_new_dirent(kernfs_root(parent), name,
T
Tejun Heo 已提交
788
			      (mode & S_IALLUGO) | S_IFREG, KERNFS_FILE);
789
	if (!kn)
790 791
		return ERR_PTR(-ENOMEM);

792 793 794
	kn->attr.ops = ops;
	kn->attr.size = size;
	kn->ns = ns;
795
	kn->priv = priv;
796 797 798

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	if (key) {
799
		lockdep_init_map(&kn->dep_map, "s_active", key, 0);
T
Tejun Heo 已提交
800
		kn->flags |= KERNFS_LOCKDEP;
801 802 803 804
	}
#endif

	/*
805
	 * kn->attr.ops is accesible only while holding active ref.  We
806 807 808 809
	 * need to know whether some ops are implemented outside active
	 * ref.  Cache their existence in flags.
	 */
	if (ops->seq_show)
T
Tejun Heo 已提交
810
		kn->flags |= KERNFS_HAS_SEQ_SHOW;
811
	if (ops->mmap)
T
Tejun Heo 已提交
812
		kn->flags |= KERNFS_HAS_MMAP;
813 814

	sysfs_addrm_start(&acxt);
815
	rc = sysfs_add_one(&acxt, kn, parent);
816 817 818
	sysfs_addrm_finish(&acxt);

	if (rc) {
819
		kernfs_put(kn);
820 821
		return ERR_PTR(rc);
	}
822
	return kn;
823
}