locks.c 78.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  linux/fs/locks.c
 *
 *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
 *  Doug Evans (dje@spiff.uucp), August 07, 1992
 *
 *  Deadlock detection added.
 *  FIXME: one thing isn't handled yet:
 *	- mandatory locks (requires lots of changes elsewhere)
 *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
 *
 *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
 *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15
 *
L
Linus Torvalds 已提交
16 17 18
 *  Converted file_lock_table to a linked list from an array, which eliminates
 *  the limits on how many active file locks are open.
 *  Chad Page (pageone@netcom.com), November 27, 1994
19
 *
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *  Removed dependency on file descriptors. dup()'ed file descriptors now
 *  get the same locks as the original file descriptors, and a close() on
 *  any file descriptor removes ALL the locks on the file for the current
 *  process. Since locks still depend on the process id, locks are inherited
 *  after an exec() but not after a fork(). This agrees with POSIX, and both
 *  BSD and SVR4 practice.
 *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
 *
 *  Scrapped free list which is redundant now that we allocate locks
 *  dynamically with kmalloc()/kfree().
 *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
 *
 *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
 *
 *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
 *  fcntl() system call. They have the semantics described above.
 *
 *  FL_FLOCK locks are created with calls to flock(), through the flock()
 *  system call, which is new. Old C libraries implement flock() via fcntl()
 *  and will continue to use the old, broken implementation.
 *
 *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
 *  with a file pointer (filp). As a result they can be shared by a parent
 *  process and its children after a fork(). They are removed when the last
 *  file descriptor referring to the file pointer is closed (unless explicitly
45
 *  unlocked).
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
 *
 *  FL_FLOCK locks never deadlock, an existing lock is always removed before
 *  upgrading from shared to exclusive (or vice versa). When this happens
 *  any processes blocked by the current lock are woken up and allowed to
 *  run before the new lock is applied.
 *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
 *
 *  Removed some race conditions in flock_lock_file(), marked other possible
54
 *  races. Just grep for FIXME to see them.
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63
 *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
 *
 *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
 *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
 *  once we've checked for blocking and deadlocking.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
 *
 *  Initial implementation of mandatory locks. SunOS turned out to be
 *  a rotten model, so I implemented the "obvious" semantics.
P
Paul Bolle 已提交
64
 *  See 'Documentation/filesystems/mandatory-locking.txt' for details.
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
 *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
 *
 *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
 *  check if a file has mandatory locks, used by mmap(), open() and creat() to
 *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
 *  Manual, Section 2.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
 *
 *  Tidied up block list handling. Added '/proc/locks' interface.
 *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
 *
 *  Fixed deadlock condition for pathological code that mixes calls to
 *  flock() and fcntl().
 *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
 *
 *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
 *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
 *  guarantee sensible behaviour in the case where file system modules might
 *  be compiled with different options than the kernel itself.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
 *
 *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
 *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
 *
 *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
 *  locks. Changed process synchronisation to avoid dereferencing locks that
 *  have already been freed.
 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
 *
 *  Made the block list a circular list to minimise searching in the list.
 *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
 *
 *  Made mandatory locking a mount option. Default is not to allow mandatory
 *  locking.
 *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
 *
 *  Some adaptations for NFS support.
 *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
 *
 *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
 *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
 *
 *  Use slab allocator instead of kmalloc/kfree.
 *  Use generic list implementation from <linux/list.h>.
 *  Sped up posix_locks_deadlock by only considering blocked locks.
 *  Matthew Wilcox <willy@debian.org>, March, 2000.
 *
 *  Leases and LOCK_MAND
 *  Matthew Wilcox <willy@debian.org>, June, 2000.
 *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
 *
 * Locking conflicts and dependencies:
 * If multiple threads attempt to lock the same byte (or flock the same file)
 * only one can be granted the lock, and other must wait their turn.
 * The first lock has been "applied" or "granted", the others are "waiting"
 * and are "blocked" by the "applied" lock..
 *
 * Waiting and applied locks are all kept in trees whose properties are:
 *
 *	- the root of a tree may be an applied or waiting lock.
 *	- every other node in the tree is a waiting lock that
 *	  conflicts with every ancestor of that node.
 *
 * Every such tree begins life as a waiting singleton which obviously
 * satisfies the above properties.
 *
 * The only ways we modify trees preserve these properties:
 *
 *	1. We may add a new leaf node, but only after first verifying that it
 *	   conflicts with all of its ancestors.
 *	2. We may remove the root of a tree, creating a new singleton
 *	   tree from the root and N new trees rooted in the immediate
 *	   children.
 *	3. If the root of a tree is not currently an applied lock, we may
 *	   apply it (if possible).
 *	4. We may upgrade the root of the tree (either extend its range,
 *	   or upgrade its entire range from read to write).
 *
 * When an applied lock is modified in a way that reduces or downgrades any
 * part of its range, we remove all its children (2 above).  This particularly
 * happens when a lock is unlocked.
 *
 * For each of those child trees we "wake up" the thread which is
 * waiting for the lock so it can continue handling as follows: if the
 * root of the tree applies, we do so (3).  If it doesn't, it must
 * conflict with some applied lock.  We remove (wake up) all of its children
 * (2), and add it is a new leaf to the tree rooted in the applied
 * lock (1).  We then repeat the process recursively with those
 * children.
 *
L
Linus Torvalds 已提交
156 157 158 159
 */

#include <linux/capability.h>
#include <linux/file.h>
A
Al Viro 已提交
160
#include <linux/fdtable.h>
L
Linus Torvalds 已提交
161 162 163 164 165 166
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/time.h>
167
#include <linux/rcupdate.h>
168
#include <linux/pid_namespace.h>
169
#include <linux/hashtable.h>
170
#include <linux/percpu.h>
L
Linus Torvalds 已提交
171

172 173 174
#define CREATE_TRACE_POINTS
#include <trace/events/filelock.h>

175
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
176 177 178

#define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
#define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
C
Christoph Hellwig 已提交
179
#define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180
#define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
181
#define IS_REMOTELCK(fl)	(fl->fl_pid <= 0)
L
Linus Torvalds 已提交
182

J
J. Bruce Fields 已提交
183 184
static bool lease_breaking(struct file_lock *fl)
{
185 186 187 188 189 190 191 192 193 194
	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
}

static int target_leasetype(struct file_lock *fl)
{
	if (fl->fl_flags & FL_UNLOCK_PENDING)
		return F_UNLCK;
	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
		return F_RDLCK;
	return fl->fl_type;
J
J. Bruce Fields 已提交
195 196
}

L
Linus Torvalds 已提交
197 198 199
int leases_enable = 1;
int lease_break_time = 45;

200
/*
201
 * The global file_lock_list is only used for displaying /proc/locks, so we
202 203 204 205 206
 * keep a list on each CPU, with each list protected by its own spinlock.
 * Global serialization is done using file_rwsem.
 *
 * Note that alterations to the list also require that the relevant flc_lock is
 * held.
207
 */
208 209 210 211 212
struct file_lock_list_struct {
	spinlock_t		lock;
	struct hlist_head	hlist;
};
static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
213
DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
214

215

216
/*
217
 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
218
 * It is protected by blocked_lock_lock.
219 220 221 222 223 224 225
 *
 * We hash locks by lockowner in order to optimize searching for the lock a
 * particular lockowner is waiting on.
 *
 * FIXME: make this value scale via some heuristic? We generally will want more
 * buckets when we have more lockowners holding locks, but that's a little
 * difficult to determine without knowing what the workload will look like.
226
 */
227 228
#define BLOCKED_HASH_BITS	7
static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
229

230
/*
231 232
 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 * want to be holding this lock.
233
 *
234 235 236
 * In addition, it also protects the fl->fl_blocked_requests list, and the
 * fl->fl_blocker pointer for file_lock structures that are acting as lock
 * requests (in contrast to those that are acting as records of acquired locks).
237 238
 *
 * Note that when we acquire this lock in order to change the above fields,
239
 * we often hold the flc_lock as well. In certain cases, when reading the fields
240
 * protected by this lock, we can skip acquiring it iff we already hold the
241
 * flc_lock.
242
 */
243
static DEFINE_SPINLOCK(blocked_lock_lock);
L
Linus Torvalds 已提交
244

245
static struct kmem_cache *flctx_cache __read_mostly;
246
static struct kmem_cache *filelock_cache __read_mostly;
L
Linus Torvalds 已提交
247

248
static struct file_lock_context *
249
locks_get_lock_context(struct inode *inode, int type)
250
{
251
	struct file_lock_context *ctx;
252

253 254 255
	/* paired with cmpxchg() below */
	ctx = smp_load_acquire(&inode->i_flctx);
	if (likely(ctx) || type == F_UNLCK)
256 257
		goto out;

258 259
	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
	if (!ctx)
260 261
		goto out;

262 263 264 265
	spin_lock_init(&ctx->flc_lock);
	INIT_LIST_HEAD(&ctx->flc_flock);
	INIT_LIST_HEAD(&ctx->flc_posix);
	INIT_LIST_HEAD(&ctx->flc_lease);
266 267 268 269 270

	/*
	 * Assign the pointer if it's not already assigned. If it is, then
	 * free the context we just allocated.
	 */
271 272 273 274
	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
		kmem_cache_free(flctx_cache, ctx);
		ctx = smp_load_acquire(&inode->i_flctx);
	}
275
out:
276
	trace_locks_get_lock_context(inode, type, ctx);
277
	return ctx;
278 279
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static void
locks_dump_ctx_list(struct list_head *list, char *list_type)
{
	struct file_lock *fl;

	list_for_each_entry(fl, list, fl_list) {
		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
	}
}

static void
locks_check_ctx_lists(struct inode *inode)
{
	struct file_lock_context *ctx = inode->i_flctx;

	if (unlikely(!list_empty(&ctx->flc_flock) ||
		     !list_empty(&ctx->flc_posix) ||
		     !list_empty(&ctx->flc_lease))) {
		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
			inode->i_ino);
		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
	}
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
static void
locks_check_ctx_file_list(struct file *filp, struct list_head *list,
				char *list_type)
{
	struct file_lock *fl;
	struct inode *inode = locks_inode(filp);

	list_for_each_entry(fl, list, fl_list)
		if (fl->fl_file == filp)
			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
				list_type, MAJOR(inode->i_sb->s_dev),
				MINOR(inode->i_sb->s_dev), inode->i_ino,
				fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
}

323
void
324
locks_free_lock_context(struct inode *inode)
325
{
326 327
	struct file_lock_context *ctx = inode->i_flctx;

328 329
	if (unlikely(ctx)) {
		locks_check_ctx_lists(inode);
330 331 332 333
		kmem_cache_free(flctx_cache, ctx);
	}
}

M
Miklos Szeredi 已提交
334
static void locks_init_lock_heads(struct file_lock *fl)
M
Miklos Szeredi 已提交
335
{
336
	INIT_HLIST_NODE(&fl->fl_link);
337
	INIT_LIST_HEAD(&fl->fl_list);
338 339
	INIT_LIST_HEAD(&fl->fl_blocked_requests);
	INIT_LIST_HEAD(&fl->fl_blocked_member);
M
Miklos Szeredi 已提交
340
	init_waitqueue_head(&fl->fl_wait);
M
Miklos Szeredi 已提交
341 342
}

L
Linus Torvalds 已提交
343
/* Allocate an empty lock structure. */
344
struct file_lock *locks_alloc_lock(void)
L
Linus Torvalds 已提交
345
{
M
Miklos Szeredi 已提交
346
	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
M
Miklos Szeredi 已提交
347 348

	if (fl)
M
Miklos Szeredi 已提交
349
		locks_init_lock_heads(fl);
M
Miklos Szeredi 已提交
350 351

	return fl;
L
Linus Torvalds 已提交
352
}
353
EXPORT_SYMBOL_GPL(locks_alloc_lock);
L
Linus Torvalds 已提交
354

355
void locks_release_private(struct file_lock *fl)
356
{
357 358 359 360 361 362
	BUG_ON(waitqueue_active(&fl->fl_wait));
	BUG_ON(!list_empty(&fl->fl_list));
	BUG_ON(!list_empty(&fl->fl_blocked_requests));
	BUG_ON(!list_empty(&fl->fl_blocked_member));
	BUG_ON(!hlist_unhashed(&fl->fl_link));

363 364 365 366 367 368
	if (fl->fl_ops) {
		if (fl->fl_ops->fl_release_private)
			fl->fl_ops->fl_release_private(fl);
		fl->fl_ops = NULL;
	}

369
	if (fl->fl_lmops) {
370 371 372 373
		if (fl->fl_lmops->lm_put_owner) {
			fl->fl_lmops->lm_put_owner(fl->fl_owner);
			fl->fl_owner = NULL;
		}
374 375
		fl->fl_lmops = NULL;
	}
376
}
377
EXPORT_SYMBOL_GPL(locks_release_private);
378

L
Linus Torvalds 已提交
379
/* Free a lock which is not in use. */
380
void locks_free_lock(struct file_lock *fl)
L
Linus Torvalds 已提交
381
{
382
	locks_release_private(fl);
L
Linus Torvalds 已提交
383 384
	kmem_cache_free(filelock_cache, fl);
}
385
EXPORT_SYMBOL(locks_free_lock);
L
Linus Torvalds 已提交
386

387 388 389 390 391 392
static void
locks_dispose_list(struct list_head *dispose)
{
	struct file_lock *fl;

	while (!list_empty(dispose)) {
393 394
		fl = list_first_entry(dispose, struct file_lock, fl_list);
		list_del_init(&fl->fl_list);
395 396 397 398
		locks_free_lock(fl);
	}
}

L
Linus Torvalds 已提交
399 400
void locks_init_lock(struct file_lock *fl)
{
M
Miklos Szeredi 已提交
401 402
	memset(fl, 0, sizeof(struct file_lock));
	locks_init_lock_heads(fl);
L
Linus Torvalds 已提交
403 404 405 406 407 408
}
EXPORT_SYMBOL(locks_init_lock);

/*
 * Initialize a new lock from an existing file_lock structure.
 */
409
void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
L
Linus Torvalds 已提交
410 411 412
{
	new->fl_owner = fl->fl_owner;
	new->fl_pid = fl->fl_pid;
413
	new->fl_file = NULL;
L
Linus Torvalds 已提交
414 415 416 417
	new->fl_flags = fl->fl_flags;
	new->fl_type = fl->fl_type;
	new->fl_start = fl->fl_start;
	new->fl_end = fl->fl_end;
418
	new->fl_lmops = fl->fl_lmops;
419
	new->fl_ops = NULL;
420 421 422

	if (fl->fl_lmops) {
		if (fl->fl_lmops->lm_get_owner)
423
			fl->fl_lmops->lm_get_owner(fl->fl_owner);
424
	}
425
}
426
EXPORT_SYMBOL(locks_copy_conflock);
427 428 429

void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
430 431
	/* "new" must be a freshly-initialized lock */
	WARN_ON_ONCE(new->fl_ops);
432

433
	locks_copy_conflock(new, fl);
434

435
	new->fl_file = fl->fl_file;
L
Linus Torvalds 已提交
436
	new->fl_ops = fl->fl_ops;
437

438 439 440 441
	if (fl->fl_ops) {
		if (fl->fl_ops->fl_copy_lock)
			fl->fl_ops->fl_copy_lock(new, fl);
	}
L
Linus Torvalds 已提交
442 443 444
}
EXPORT_SYMBOL(locks_copy_lock);

445 446 447 448 449 450 451 452 453 454 455 456 457
static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
{
	struct file_lock *f;

	/*
	 * As ctx->flc_lock is held, new requests cannot be added to
	 * ->fl_blocked_requests, so we don't need a lock to check if it
	 * is empty.
	 */
	if (list_empty(&fl->fl_blocked_requests))
		return;
	spin_lock(&blocked_lock_lock);
	list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
458
	list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
459 460 461 462
		f->fl_blocker = new;
	spin_unlock(&blocked_lock_lock);
}

L
Linus Torvalds 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
static inline int flock_translate_cmd(int cmd) {
	if (cmd & LOCK_MAND)
		return cmd & (LOCK_MAND | LOCK_RW);
	switch (cmd) {
	case LOCK_SH:
		return F_RDLCK;
	case LOCK_EX:
		return F_WRLCK;
	case LOCK_UN:
		return F_UNLCK;
	}
	return -EINVAL;
}

/* Fill in a file_lock structure with an appropriate FLOCK lock. */
478
static struct file_lock *
479
flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
L
Linus Torvalds 已提交
480 481
{
	int type = flock_translate_cmd(cmd);
482

L
Linus Torvalds 已提交
483
	if (type < 0)
484
		return ERR_PTR(type);
485

486 487 488 489 490 491 492
	if (fl == NULL) {
		fl = locks_alloc_lock();
		if (fl == NULL)
			return ERR_PTR(-ENOMEM);
	} else {
		locks_init_lock(fl);
	}
L
Linus Torvalds 已提交
493 494

	fl->fl_file = filp;
495
	fl->fl_owner = filp;
L
Linus Torvalds 已提交
496 497 498 499
	fl->fl_pid = current->tgid;
	fl->fl_flags = FL_FLOCK;
	fl->fl_type = type;
	fl->fl_end = OFFSET_MAX;
500

501
	return fl;
L
Linus Torvalds 已提交
502 503
}

504
static int assign_type(struct file_lock *fl, long type)
L
Linus Torvalds 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517
{
	switch (type) {
	case F_RDLCK:
	case F_WRLCK:
	case F_UNLCK:
		fl->fl_type = type;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

518 519
static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
				 struct flock64 *l)
L
Linus Torvalds 已提交
520 521
{
	switch (l->l_whence) {
522
	case SEEK_SET:
523
		fl->fl_start = 0;
L
Linus Torvalds 已提交
524
		break;
525
	case SEEK_CUR:
526
		fl->fl_start = filp->f_pos;
L
Linus Torvalds 已提交
527
		break;
528
	case SEEK_END:
529
		fl->fl_start = i_size_read(file_inode(filp));
L
Linus Torvalds 已提交
530 531 532 533
		break;
	default:
		return -EINVAL;
	}
534 535 536 537 538
	if (l->l_start > OFFSET_MAX - fl->fl_start)
		return -EOVERFLOW;
	fl->fl_start += l->l_start;
	if (fl->fl_start < 0)
		return -EINVAL;
L
Linus Torvalds 已提交
539 540 541

	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
	   POSIX-2001 defines it. */
542
	if (l->l_len > 0) {
543 544 545 546
		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
			return -EOVERFLOW;
		fl->fl_end = fl->fl_start + l->l_len - 1;

547
	} else if (l->l_len < 0) {
548
		if (fl->fl_start + l->l_len < 0)
549
			return -EINVAL;
550 551 552 553 554
		fl->fl_end = fl->fl_start - 1;
		fl->fl_start += l->l_len;
	} else
		fl->fl_end = OFFSET_MAX;

L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564
	fl->fl_owner = current->files;
	fl->fl_pid = current->tgid;
	fl->fl_file = filp;
	fl->fl_flags = FL_POSIX;
	fl->fl_ops = NULL;
	fl->fl_lmops = NULL;

	return assign_type(fl, l->l_type);
}

565 566 567 568 569
/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 * style lock.
 */
static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
			       struct flock *l)
L
Linus Torvalds 已提交
570
{
571 572 573 574 575 576 577 578
	struct flock64 ll = {
		.l_type = l->l_type,
		.l_whence = l->l_whence,
		.l_start = l->l_start,
		.l_len = l->l_len,
	};

	return flock64_to_posix_lock(filp, fl, &ll);
L
Linus Torvalds 已提交
579 580 581
}

/* default lease lock manager operations */
J
Jeff Layton 已提交
582 583
static bool
lease_break_callback(struct file_lock *fl)
L
Linus Torvalds 已提交
584 585
{
	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
J
Jeff Layton 已提交
586
	return false;
L
Linus Torvalds 已提交
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602
static void
lease_setup(struct file_lock *fl, void **priv)
{
	struct file *filp = fl->fl_file;
	struct fasync_struct *fa = *priv;

	/*
	 * fasync_insert_entry() returns the old entry if any. If there was no
	 * old entry, then it used "priv" and inserted it into the fasync list.
	 * Clear the pointer to indicate that it shouldn't be freed.
	 */
	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
		*priv = NULL;

603
	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
604 605
}

606
static const struct lock_manager_operations lease_manager_ops = {
J
J. Bruce Fields 已提交
607 608
	.lm_break = lease_break_callback,
	.lm_change = lease_modify,
609
	.lm_setup = lease_setup,
L
Linus Torvalds 已提交
610 611 612 613 614
};

/*
 * Initialize a lease, use the default lock manager operations
 */
615
static int lease_init(struct file *filp, long type, struct file_lock *fl)
616
{
617 618 619
	if (assign_type(fl, type) != 0)
		return -EINVAL;

620
	fl->fl_owner = filp;
L
Linus Torvalds 已提交
621 622 623 624 625 626 627 628 629 630 631 632
	fl->fl_pid = current->tgid;

	fl->fl_file = filp;
	fl->fl_flags = FL_LEASE;
	fl->fl_start = 0;
	fl->fl_end = OFFSET_MAX;
	fl->fl_ops = NULL;
	fl->fl_lmops = &lease_manager_ops;
	return 0;
}

/* Allocate a file_lock initialised to this type of lease */
633
static struct file_lock *lease_alloc(struct file *filp, long type)
L
Linus Torvalds 已提交
634 635
{
	struct file_lock *fl = locks_alloc_lock();
636
	int error = -ENOMEM;
L
Linus Torvalds 已提交
637 638

	if (fl == NULL)
J
J. Bruce Fields 已提交
639
		return ERR_PTR(error);
L
Linus Torvalds 已提交
640 641

	error = lease_init(filp, type, fl);
642 643
	if (error) {
		locks_free_lock(fl);
J
J. Bruce Fields 已提交
644
		return ERR_PTR(error);
645
	}
J
J. Bruce Fields 已提交
646
	return fl;
L
Linus Torvalds 已提交
647 648 649 650 651 652 653 654 655 656 657 658 659
}

/* Check if two locks overlap each other.
 */
static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
{
	return ((fl1->fl_end >= fl2->fl_start) &&
		(fl2->fl_end >= fl1->fl_start));
}

/*
 * Check whether two locks have the same owner.
 */
660
static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
L
Linus Torvalds 已提交
661 662 663 664
{
	return fl1->fl_owner == fl2->fl_owner;
}

665
/* Must be called with the flc_lock held! */
666
static void locks_insert_global_locks(struct file_lock *fl)
667
{
668 669
	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);

670 671
	percpu_rwsem_assert_held(&file_rwsem);

672
	spin_lock(&fll->lock);
673
	fl->fl_link_cpu = smp_processor_id();
674 675
	hlist_add_head(&fl->fl_link, &fll->hlist);
	spin_unlock(&fll->lock);
676 677
}

678
/* Must be called with the flc_lock held! */
679
static void locks_delete_global_locks(struct file_lock *fl)
680
{
681 682
	struct file_lock_list_struct *fll;

683 684
	percpu_rwsem_assert_held(&file_rwsem);

685 686
	/*
	 * Avoid taking lock if already unhashed. This is safe since this check
687
	 * is done while holding the flc_lock, and new insertions into the list
688 689 690 691
	 * also require that it be held.
	 */
	if (hlist_unhashed(&fl->fl_link))
		return;
692 693 694

	fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
	spin_lock(&fll->lock);
695
	hlist_del_init(&fl->fl_link);
696
	spin_unlock(&fll->lock);
697 698
}

699 700 701 702 703 704
static unsigned long
posix_owner_key(struct file_lock *fl)
{
	return (unsigned long)fl->fl_owner;
}

705
static void locks_insert_global_blocked(struct file_lock *waiter)
706
{
707 708
	lockdep_assert_held(&blocked_lock_lock);

709
	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
710 711
}

712
static void locks_delete_global_blocked(struct file_lock *waiter)
713
{
714 715
	lockdep_assert_held(&blocked_lock_lock);

716
	hash_del(&waiter->fl_link);
717 718
}

L
Linus Torvalds 已提交
719 720
/* Remove waiter from blocker's block list.
 * When blocker ends up pointing to itself then the list is empty.
721
 *
722
 * Must be called with blocked_lock_lock held.
L
Linus Torvalds 已提交
723
 */
724
static void __locks_delete_block(struct file_lock *waiter)
L
Linus Torvalds 已提交
725
{
726
	locks_delete_global_blocked(waiter);
727 728
	list_del_init(&waiter->fl_blocked_member);
	waiter->fl_blocker = NULL;
L
Linus Torvalds 已提交
729 730
}

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
static void __locks_wake_up_blocks(struct file_lock *blocker)
{
	while (!list_empty(&blocker->fl_blocked_requests)) {
		struct file_lock *waiter;

		waiter = list_first_entry(&blocker->fl_blocked_requests,
					  struct file_lock, fl_blocked_member);
		__locks_delete_block(waiter);
		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
			waiter->fl_lmops->lm_notify(waiter);
		else
			wake_up(&waiter->fl_wait);
	}
}

746 747 748 749 750 751 752
/**
 *	locks_delete_lock - stop waiting for a file lock
 *	@waiter: the lock which was waiting
 *
 *	lockd/nfsd need to disconnect the lock while working on it.
 */
int locks_delete_block(struct file_lock *waiter)
L
Linus Torvalds 已提交
753
{
754 755
	int status = -ENOENT;

756
	spin_lock(&blocked_lock_lock);
757 758
	if (waiter->fl_blocker)
		status = 0;
759
	__locks_wake_up_blocks(waiter);
L
Linus Torvalds 已提交
760
	__locks_delete_block(waiter);
761
	spin_unlock(&blocked_lock_lock);
762
	return status;
L
Linus Torvalds 已提交
763
}
764
EXPORT_SYMBOL(locks_delete_block);
L
Linus Torvalds 已提交
765 766 767 768 769

/* Insert waiter into blocker's block list.
 * We use a circular list so that processes can be easily woken up in
 * the order they blocked. The documentation doesn't require this but
 * it seems like the reasonable thing to do.
770
 *
771
 * Must be called with both the flc_lock and blocked_lock_lock held. The
772 773 774 775
 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
 * but by ensuring that the flc_lock is also held on insertions we can avoid
 * taking the blocked_lock_lock in some cases when we see that the
 * fl_blocked_requests list is empty.
776 777 778 779
 *
 * Rather than just adding to the list, we check for conflicts with any existing
 * waiters, and add beneath any waiter that blocks the new waiter.
 * Thus wakeups don't happen until needed.
L
Linus Torvalds 已提交
780
 */
781
static void __locks_insert_block(struct file_lock *blocker,
782 783 784
				 struct file_lock *waiter,
				 bool conflict(struct file_lock *,
					       struct file_lock *))
L
Linus Torvalds 已提交
785
{
786
	struct file_lock *fl;
787
	BUG_ON(!list_empty(&waiter->fl_blocked_member));
788 789 790 791 792 793 794

new_blocker:
	list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
		if (conflict(fl, waiter)) {
			blocker =  fl;
			goto new_blocker;
		}
795 796
	waiter->fl_blocker = blocker;
	list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
797
	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
798
		locks_insert_global_blocked(waiter);
799 800 801 802 803 804

	/* The requests in waiter->fl_blocked are known to conflict with
	 * waiter, but might not conflict with blocker, or the requests
	 * and lock which block it.  So they all need to be woken.
	 */
	__locks_wake_up_blocks(waiter);
805 806
}

807
/* Must be called with flc_lock held. */
808
static void locks_insert_block(struct file_lock *blocker,
809 810 811
			       struct file_lock *waiter,
			       bool conflict(struct file_lock *,
					     struct file_lock *))
812
{
813
	spin_lock(&blocked_lock_lock);
814
	__locks_insert_block(blocker, waiter, conflict);
815
	spin_unlock(&blocked_lock_lock);
L
Linus Torvalds 已提交
816 817
}

818 819 820
/*
 * Wake up processes blocked waiting for blocker.
 *
821
 * Must be called with the inode->flc_lock held!
L
Linus Torvalds 已提交
822 823 824
 */
static void locks_wake_up_blocks(struct file_lock *blocker)
{
825 826
	/*
	 * Avoid taking global lock if list is empty. This is safe since new
827
	 * blocked requests are only added to the list under the flc_lock, and
828 829 830
	 * the flc_lock is always held here. Note that removal from the
	 * fl_blocked_requests list does not require the flc_lock, so we must
	 * recheck list_empty() after acquiring the blocked_lock_lock.
831
	 */
832
	if (list_empty(&blocker->fl_blocked_requests))
833 834
		return;

835
	spin_lock(&blocked_lock_lock);
836
	__locks_wake_up_blocks(blocker);
837
	spin_unlock(&blocked_lock_lock);
L
Linus Torvalds 已提交
838 839
}

840
static void
841
locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
842 843 844 845 846
{
	list_add_tail(&fl->fl_list, before);
	locks_insert_global_locks(fl);
}

847
static void
848
locks_unlink_lock_ctx(struct file_lock *fl)
L
Linus Torvalds 已提交
849
{
850
	locks_delete_global_locks(fl);
851
	list_del_init(&fl->fl_list);
L
Linus Torvalds 已提交
852
	locks_wake_up_blocks(fl);
853 854
}

855
static void
856
locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
857
{
858
	locks_unlink_lock_ctx(fl);
859
	if (dispose)
860
		list_add(&fl->fl_list, dispose);
861 862
	else
		locks_free_lock(fl);
L
Linus Torvalds 已提交
863 864 865 866 867
}

/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 * checks for shared/exclusive status of overlapping locks.
 */
868 869
static bool locks_conflict(struct file_lock *caller_fl,
			   struct file_lock *sys_fl)
L
Linus Torvalds 已提交
870 871
{
	if (sys_fl->fl_type == F_WRLCK)
872
		return true;
L
Linus Torvalds 已提交
873
	if (caller_fl->fl_type == F_WRLCK)
874 875
		return true;
	return false;
L
Linus Torvalds 已提交
876 877 878 879 880
}

/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 * checking before calling the locks_conflict().
 */
881 882
static bool posix_locks_conflict(struct file_lock *caller_fl,
				 struct file_lock *sys_fl)
L
Linus Torvalds 已提交
883 884 885 886
{
	/* POSIX locks owned by the same process do not conflict with
	 * each other.
	 */
887
	if (posix_same_owner(caller_fl, sys_fl))
888
		return false;
L
Linus Torvalds 已提交
889 890 891

	/* Check whether they overlap */
	if (!locks_overlap(caller_fl, sys_fl))
892
		return false;
L
Linus Torvalds 已提交
893

894
	return locks_conflict(caller_fl, sys_fl);
L
Linus Torvalds 已提交
895 896 897 898 899
}

/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 * checking before calling the locks_conflict().
 */
900 901
static bool flock_locks_conflict(struct file_lock *caller_fl,
				 struct file_lock *sys_fl)
L
Linus Torvalds 已提交
902 903 904 905
{
	/* FLOCK locks referring to the same filp do not conflict with
	 * each other.
	 */
906
	if (caller_fl->fl_file == sys_fl->fl_file)
907
		return false;
L
Linus Torvalds 已提交
908
	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
909
		return false;
L
Linus Torvalds 已提交
910

911
	return locks_conflict(caller_fl, sys_fl);
L
Linus Torvalds 已提交
912 913
}

914
void
915
posix_test_lock(struct file *filp, struct file_lock *fl)
L
Linus Torvalds 已提交
916 917
{
	struct file_lock *cfl;
918
	struct file_lock_context *ctx;
919
	struct inode *inode = locks_inode(filp);
L
Linus Torvalds 已提交
920

921
	ctx = smp_load_acquire(&inode->i_flctx);
922 923 924 925 926
	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
		fl->fl_type = F_UNLCK;
		return;
	}

927
	spin_lock(&ctx->flc_lock);
928 929 930 931 932
	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
		if (posix_locks_conflict(fl, cfl)) {
			locks_copy_conflock(fl, cfl);
			goto out;
		}
L
Linus Torvalds 已提交
933
	}
934 935
	fl->fl_type = F_UNLCK;
out:
936
	spin_unlock(&ctx->flc_lock);
937
	return;
L
Linus Torvalds 已提交
938 939 940
}
EXPORT_SYMBOL(posix_test_lock);

941 942 943 944 945
/*
 * Deadlock detection:
 *
 * We attempt to detect deadlocks that are due purely to posix file
 * locks.
L
Linus Torvalds 已提交
946
 *
947 948 949 950 951 952 953
 * We assume that a task can be waiting for at most one lock at a time.
 * So for any acquired lock, the process holding that lock may be
 * waiting on at most one other lock.  That lock in turns may be held by
 * someone waiting for at most one other lock.  Given a requested lock
 * caller_fl which is about to wait for a conflicting lock block_fl, we
 * follow this chain of waiters to ensure we are not about to create a
 * cycle.
L
Linus Torvalds 已提交
954
 *
955 956 957
 * Since we do this before we ever put a process to sleep on a lock, we
 * are ensured that there is never a cycle; that is what guarantees that
 * the while() loop in posix_locks_deadlock() eventually completes.
958
 *
959 960 961 962
 * Note: the above assumption may not be true when handling lock
 * requests from a broken NFS client. It may also fail in the presence
 * of tasks (such as posix threads) sharing the same open file table.
 * To handle those cases, we just bail out after a few iterations.
963
 *
964
 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
965 966 967 968
 * Because the owner is not even nominally tied to a thread of
 * execution, the deadlock detection below can't reasonably work well. Just
 * skip it for those.
 *
969
 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
970 971
 * locks that just checks for the case where two tasks are attempting to
 * upgrade from read to write locks on the same inode.
L
Linus Torvalds 已提交
972
 */
973 974 975

#define MAX_DEADLK_ITERATIONS 10

976 977 978 979 980
/* Find a lock that the owner of the given block_fl is blocking on. */
static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
{
	struct file_lock *fl;

981
	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
982 983 984 985 986
		if (posix_same_owner(fl, block_fl)) {
			while (fl->fl_blocker)
				fl = fl->fl_blocker;
			return fl;
		}
987 988 989 990
	}
	return NULL;
}

991
/* Must be called with the blocked_lock_lock held! */
992
static int posix_locks_deadlock(struct file_lock *caller_fl,
L
Linus Torvalds 已提交
993 994
				struct file_lock *block_fl)
{
995
	int i = 0;
L
Linus Torvalds 已提交
996

997 998
	lockdep_assert_held(&blocked_lock_lock);

999 1000
	/*
	 * This deadlock detector can't reasonably detect deadlocks with
1001
	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1002
	 */
1003
	if (IS_OFDLCK(caller_fl))
1004 1005
		return 0;

1006 1007 1008 1009 1010
	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
		if (i++ > MAX_DEADLK_ITERATIONS)
			return 0;
		if (posix_same_owner(caller_fl, block_fl))
			return 1;
L
Linus Torvalds 已提交
1011 1012 1013 1014 1015
	}
	return 0;
}

/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1016
 * after any leases, but before any posix locks.
1017 1018 1019 1020
 *
 * Note that if called with an FL_EXISTS argument, the caller may determine
 * whether or not a lock was successfully freed by testing the return
 * value for -ENOENT.
L
Linus Torvalds 已提交
1021
 */
1022
static int flock_lock_inode(struct inode *inode, struct file_lock *request)
L
Linus Torvalds 已提交
1023
{
1024
	struct file_lock *new_fl = NULL;
1025 1026
	struct file_lock *fl;
	struct file_lock_context *ctx;
L
Linus Torvalds 已提交
1027
	int error = 0;
1028
	bool found = false;
1029
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
1030

1031 1032 1033 1034 1035 1036
	ctx = locks_get_lock_context(inode, request->fl_type);
	if (!ctx) {
		if (request->fl_type != F_UNLCK)
			return -ENOMEM;
		return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
	}
1037

1038
	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1039
		new_fl = locks_alloc_lock();
1040 1041
		if (!new_fl)
			return -ENOMEM;
1042 1043
	}

1044
	percpu_down_read(&file_rwsem);
1045
	spin_lock(&ctx->flc_lock);
1046 1047 1048
	if (request->fl_flags & FL_ACCESS)
		goto find_conflict;

1049
	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1050
		if (request->fl_file != fl->fl_file)
L
Linus Torvalds 已提交
1051
			continue;
1052
		if (request->fl_type == fl->fl_type)
L
Linus Torvalds 已提交
1053
			goto out;
1054
		found = true;
1055
		locks_delete_lock_ctx(fl, &dispose);
L
Linus Torvalds 已提交
1056 1057 1058
		break;
	}

1059 1060 1061
	if (request->fl_type == F_UNLCK) {
		if ((request->fl_flags & FL_EXISTS) && !found)
			error = -ENOENT;
1062
		goto out;
1063
	}
L
Linus Torvalds 已提交
1064

1065
find_conflict:
1066
	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1067
		if (!flock_locks_conflict(request, fl))
L
Linus Torvalds 已提交
1068 1069
			continue;
		error = -EAGAIN;
1070 1071 1072
		if (!(request->fl_flags & FL_SLEEP))
			goto out;
		error = FILE_LOCK_DEFERRED;
1073
		locks_insert_block(fl, request, flock_locks_conflict);
L
Linus Torvalds 已提交
1074 1075
		goto out;
	}
1076 1077
	if (request->fl_flags & FL_ACCESS)
		goto out;
1078
	locks_copy_lock(new_fl, request);
1079
	locks_move_blocks(new_fl, request);
1080
	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1081
	new_fl = NULL;
1082
	error = 0;
L
Linus Torvalds 已提交
1083 1084

out:
1085
	spin_unlock(&ctx->flc_lock);
1086
	percpu_up_read(&file_rwsem);
1087 1088
	if (new_fl)
		locks_free_lock(new_fl);
1089
	locks_dispose_list(&dispose);
1090
	trace_flock_lock_inode(inode, request, error);
L
Linus Torvalds 已提交
1091 1092 1093
	return error;
}

1094 1095
static int posix_lock_inode(struct inode *inode, struct file_lock *request,
			    struct file_lock *conflock)
L
Linus Torvalds 已提交
1096
{
1097
	struct file_lock *fl, *tmp;
1098 1099
	struct file_lock *new_fl = NULL;
	struct file_lock *new_fl2 = NULL;
L
Linus Torvalds 已提交
1100 1101
	struct file_lock *left = NULL;
	struct file_lock *right = NULL;
1102
	struct file_lock_context *ctx;
1103 1104
	int error;
	bool added = false;
1105
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
1106

1107
	ctx = locks_get_lock_context(inode, request->fl_type);
1108
	if (!ctx)
1109
		return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1110

L
Linus Torvalds 已提交
1111 1112 1113
	/*
	 * We may need two file_lock structures for this operation,
	 * so we get them in advance to avoid races.
1114 1115
	 *
	 * In some cases we can be sure, that no new locks will be needed
L
Linus Torvalds 已提交
1116
	 */
1117 1118 1119 1120 1121 1122
	if (!(request->fl_flags & FL_ACCESS) &&
	    (request->fl_type != F_UNLCK ||
	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
		new_fl = locks_alloc_lock();
		new_fl2 = locks_alloc_lock();
	}
L
Linus Torvalds 已提交
1123

1124
	percpu_down_read(&file_rwsem);
1125
	spin_lock(&ctx->flc_lock);
1126 1127 1128
	/*
	 * New lock request. Walk all POSIX locks and look for conflicts. If
	 * there are any, either return error or put the request on the
1129
	 * blocker's list of waiters and the global blocked_hash.
1130
	 */
L
Linus Torvalds 已提交
1131
	if (request->fl_type != F_UNLCK) {
1132
		list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
L
Linus Torvalds 已提交
1133 1134
			if (!posix_locks_conflict(request, fl))
				continue;
1135
			if (conflock)
1136
				locks_copy_conflock(conflock, fl);
L
Linus Torvalds 已提交
1137 1138 1139
			error = -EAGAIN;
			if (!(request->fl_flags & FL_SLEEP))
				goto out;
1140 1141 1142 1143
			/*
			 * Deadlock detection and insertion into the blocked
			 * locks list must be done while holding the same lock!
			 */
L
Linus Torvalds 已提交
1144
			error = -EDEADLK;
1145
			spin_lock(&blocked_lock_lock);
1146 1147 1148 1149 1150
			/*
			 * Ensure that we don't find any locks blocked on this
			 * request during deadlock detection.
			 */
			__locks_wake_up_blocks(request);
1151 1152
			if (likely(!posix_locks_deadlock(request, fl))) {
				error = FILE_LOCK_DEFERRED;
1153 1154
				__locks_insert_block(fl, request,
						     posix_locks_conflict);
1155
			}
1156
			spin_unlock(&blocked_lock_lock);
L
Linus Torvalds 已提交
1157
			goto out;
1158 1159
		}
	}
L
Linus Torvalds 已提交
1160 1161 1162 1163 1164 1165

	/* If we're just looking for a conflict, we're done. */
	error = 0;
	if (request->fl_flags & FL_ACCESS)
		goto out;

1166 1167 1168 1169
	/* Find the first old lock with the same owner as the new lock */
	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
		if (posix_same_owner(request, fl))
			break;
L
Linus Torvalds 已提交
1170 1171
	}

1172
	/* Process locks with this owner. */
1173 1174 1175 1176 1177
	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
		if (!posix_same_owner(request, fl))
			break;

		/* Detect adjacent or overlapping regions (if same lock type) */
L
Linus Torvalds 已提交
1178
		if (request->fl_type == fl->fl_type) {
O
Olaf Kirch 已提交
1179 1180 1181 1182
			/* In all comparisons of start vs end, use
			 * "start - 1" rather than "end + 1". If end
			 * is OFFSET_MAX, end + 1 will become negative.
			 */
L
Linus Torvalds 已提交
1183
			if (fl->fl_end < request->fl_start - 1)
1184
				continue;
L
Linus Torvalds 已提交
1185 1186 1187
			/* If the next lock in the list has entirely bigger
			 * addresses than the new one, insert the lock here.
			 */
O
Olaf Kirch 已提交
1188
			if (fl->fl_start - 1 > request->fl_end)
L
Linus Torvalds 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
				break;

			/* If we come here, the new and old lock are of the
			 * same type and adjacent or overlapping. Make one
			 * lock yielding from the lower start address of both
			 * locks to the higher end address.
			 */
			if (fl->fl_start > request->fl_start)
				fl->fl_start = request->fl_start;
			else
				request->fl_start = fl->fl_start;
			if (fl->fl_end < request->fl_end)
				fl->fl_end = request->fl_end;
			else
				request->fl_end = fl->fl_end;
			if (added) {
1205
				locks_delete_lock_ctx(fl, &dispose);
L
Linus Torvalds 已提交
1206 1207 1208
				continue;
			}
			request = fl;
1209
			added = true;
1210
		} else {
L
Linus Torvalds 已提交
1211 1212 1213 1214
			/* Processing for different lock types is a bit
			 * more complex.
			 */
			if (fl->fl_end < request->fl_start)
1215
				continue;
L
Linus Torvalds 已提交
1216 1217 1218
			if (fl->fl_start > request->fl_end)
				break;
			if (request->fl_type == F_UNLCK)
1219
				added = true;
L
Linus Torvalds 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
			if (fl->fl_start < request->fl_start)
				left = fl;
			/* If the next lock in the list has a higher end
			 * address than the new one, insert the new one here.
			 */
			if (fl->fl_end > request->fl_end) {
				right = fl;
				break;
			}
			if (fl->fl_start >= request->fl_start) {
				/* The new lock completely replaces an old
				 * one (This may happen several times).
				 */
				if (added) {
1234
					locks_delete_lock_ctx(fl, &dispose);
L
Linus Torvalds 已提交
1235 1236
					continue;
				}
1237 1238 1239 1240 1241 1242
				/*
				 * Replace the old lock with new_fl, and
				 * remove the old one. It's safe to do the
				 * insert here since we know that we won't be
				 * using new_fl later, and that the lock is
				 * just replacing an existing lock.
L
Linus Torvalds 已提交
1243
				 */
1244 1245 1246 1247 1248 1249
				error = -ENOLCK;
				if (!new_fl)
					goto out;
				locks_copy_lock(new_fl, request);
				request = new_fl;
				new_fl = NULL;
1250 1251
				locks_insert_lock_ctx(request, &fl->fl_list);
				locks_delete_lock_ctx(fl, &dispose);
1252
				added = true;
L
Linus Torvalds 已提交
1253 1254 1255 1256
			}
		}
	}

1257
	/*
1258 1259 1260
	 * The above code only modifies existing locks in case of merging or
	 * replacing. If new lock(s) need to be inserted all modifications are
	 * done below this, so it's safe yet to bail out.
1261 1262 1263 1264 1265
	 */
	error = -ENOLCK; /* "no luck" */
	if (right && left == right && !new_fl2)
		goto out;

L
Linus Torvalds 已提交
1266 1267
	error = 0;
	if (!added) {
1268 1269 1270
		if (request->fl_type == F_UNLCK) {
			if (request->fl_flags & FL_EXISTS)
				error = -ENOENT;
L
Linus Torvalds 已提交
1271
			goto out;
1272
		}
1273 1274 1275 1276 1277

		if (!new_fl) {
			error = -ENOLCK;
			goto out;
		}
L
Linus Torvalds 已提交
1278
		locks_copy_lock(new_fl, request);
1279
		locks_move_blocks(new_fl, request);
1280
		locks_insert_lock_ctx(new_fl, &fl->fl_list);
1281
		fl = new_fl;
L
Linus Torvalds 已提交
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
		new_fl = NULL;
	}
	if (right) {
		if (left == right) {
			/* The new lock breaks the old one in two pieces,
			 * so we have to use the second new lock.
			 */
			left = new_fl2;
			new_fl2 = NULL;
			locks_copy_lock(left, right);
1292
			locks_insert_lock_ctx(left, &fl->fl_list);
L
Linus Torvalds 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301
		}
		right->fl_start = request->fl_end + 1;
		locks_wake_up_blocks(right);
	}
	if (left) {
		left->fl_end = request->fl_start - 1;
		locks_wake_up_blocks(left);
	}
 out:
1302
	spin_unlock(&ctx->flc_lock);
1303
	percpu_up_read(&file_rwsem);
L
Linus Torvalds 已提交
1304 1305 1306 1307 1308 1309 1310
	/*
	 * Free any unused locks.
	 */
	if (new_fl)
		locks_free_lock(new_fl);
	if (new_fl2)
		locks_free_lock(new_fl2);
1311
	locks_dispose_list(&dispose);
1312 1313
	trace_posix_lock_inode(inode, request, error);

L
Linus Torvalds 已提交
1314 1315 1316 1317 1318 1319 1320
	return error;
}

/**
 * posix_lock_file - Apply a POSIX-style lock to a file
 * @filp: The file to apply the lock to
 * @fl: The lock to be applied
1321
 * @conflock: Place to return a copy of the conflicting lock, if found.
L
Linus Torvalds 已提交
1322 1323 1324 1325
 *
 * Add a POSIX style lock to a file.
 * We merge adjacent & overlapping locks whenever possible.
 * POSIX locks are sorted by owner task, then by starting address
1326 1327 1328 1329
 *
 * Note that if called with an FL_EXISTS argument, the caller may determine
 * whether or not a lock was successfully freed by testing the return
 * value for -ENOENT.
L
Linus Torvalds 已提交
1330
 */
1331
int posix_lock_file(struct file *filp, struct file_lock *fl,
1332 1333
			struct file_lock *conflock)
{
1334
	return posix_lock_inode(locks_inode(filp), fl, conflock);
L
Linus Torvalds 已提交
1335
}
1336
EXPORT_SYMBOL(posix_lock_file);
L
Linus Torvalds 已提交
1337 1338

/**
1339 1340
 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
 * @inode: inode of file to which lock request should be applied
L
Linus Torvalds 已提交
1341 1342
 * @fl: The lock to be applied
 *
1343
 * Apply a POSIX style lock request to an inode.
L
Linus Torvalds 已提交
1344
 */
1345
static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
L
Linus Torvalds 已提交
1346 1347 1348 1349
{
	int error;
	might_sleep ();
	for (;;) {
1350
		error = posix_lock_inode(inode, fl, NULL);
1351
		if (error != FILE_LOCK_DEFERRED)
L
Linus Torvalds 已提交
1352
			break;
1353
		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
1354 1355
		if (error)
			break;
L
Linus Torvalds 已提交
1356
	}
1357
	locks_delete_block(fl);
L
Linus Torvalds 已提交
1358 1359
	return error;
}
1360

1361
#ifdef CONFIG_MANDATORY_FILE_LOCKING
L
Linus Torvalds 已提交
1362 1363
/**
 * locks_mandatory_locked - Check for an active lock
1364
 * @file: the file to check
L
Linus Torvalds 已提交
1365 1366 1367 1368
 *
 * Searches the inode's list of locks to find any POSIX locks which conflict.
 * This function is called from locks_verify_locked() only.
 */
1369
int locks_mandatory_locked(struct file *file)
L
Linus Torvalds 已提交
1370
{
1371
	int ret;
1372
	struct inode *inode = locks_inode(file);
1373
	struct file_lock_context *ctx;
L
Linus Torvalds 已提交
1374 1375
	struct file_lock *fl;

1376
	ctx = smp_load_acquire(&inode->i_flctx);
1377 1378 1379
	if (!ctx || list_empty_careful(&ctx->flc_posix))
		return 0;

L
Linus Torvalds 已提交
1380 1381 1382
	/*
	 * Search the lock list for this inode for any POSIX locks.
	 */
1383
	spin_lock(&ctx->flc_lock);
1384 1385
	ret = 0;
	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1386
		if (fl->fl_owner != current->files &&
1387 1388
		    fl->fl_owner != file) {
			ret = -EAGAIN;
L
Linus Torvalds 已提交
1389
			break;
1390
		}
L
Linus Torvalds 已提交
1391
	}
1392
	spin_unlock(&ctx->flc_lock);
1393
	return ret;
L
Linus Torvalds 已提交
1394 1395 1396 1397
}

/**
 * locks_mandatory_area - Check for a conflicting lock
1398
 * @inode:	the file to check
L
Linus Torvalds 已提交
1399
 * @filp:       how the file was opened (if it was)
1400 1401 1402
 * @start:	first byte in the file to check
 * @end:	lastbyte in the file to check
 * @type:	%F_WRLCK for a write lock, else %F_RDLCK
L
Linus Torvalds 已提交
1403 1404 1405
 *
 * Searches the inode's list of locks to find any POSIX locks which conflict.
 */
1406 1407
int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
			 loff_t end, unsigned char type)
L
Linus Torvalds 已提交
1408 1409 1410
{
	struct file_lock fl;
	int error;
1411
	bool sleep = false;
L
Linus Torvalds 已提交
1412 1413 1414 1415 1416 1417

	locks_init_lock(&fl);
	fl.fl_pid = current->tgid;
	fl.fl_file = filp;
	fl.fl_flags = FL_POSIX | FL_ACCESS;
	if (filp && !(filp->f_flags & O_NONBLOCK))
1418
		sleep = true;
1419 1420 1421
	fl.fl_type = type;
	fl.fl_start = start;
	fl.fl_end = end;
L
Linus Torvalds 已提交
1422 1423

	for (;;) {
1424
		if (filp) {
1425
			fl.fl_owner = filp;
1426
			fl.fl_flags &= ~FL_SLEEP;
1427
			error = posix_lock_inode(inode, &fl, NULL);
1428 1429 1430 1431 1432 1433 1434
			if (!error)
				break;
		}

		if (sleep)
			fl.fl_flags |= FL_SLEEP;
		fl.fl_owner = current->files;
1435
		error = posix_lock_inode(inode, &fl, NULL);
1436
		if (error != FILE_LOCK_DEFERRED)
L
Linus Torvalds 已提交
1437
			break;
1438
		error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
L
Linus Torvalds 已提交
1439 1440 1441 1442 1443
		if (!error) {
			/*
			 * If we've been sleeping someone might have
			 * changed the permissions behind our back.
			 */
1444
			if (__mandatory_lock(inode))
L
Linus Torvalds 已提交
1445 1446 1447 1448 1449
				continue;
		}

		break;
	}
1450
	locks_delete_block(&fl);
L
Linus Torvalds 已提交
1451 1452 1453 1454

	return error;
}
EXPORT_SYMBOL(locks_mandatory_area);
1455
#endif /* CONFIG_MANDATORY_FILE_LOCKING */
L
Linus Torvalds 已提交
1456

1457 1458 1459 1460 1461
static void lease_clear_pending(struct file_lock *fl, int arg)
{
	switch (arg) {
	case F_UNLCK:
		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1462
		/* fall through */
1463 1464 1465 1466 1467
	case F_RDLCK:
		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
	}
}

L
Linus Torvalds 已提交
1468
/* We already had a lease on this file; just change its type */
1469
int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
L
Linus Torvalds 已提交
1470 1471 1472 1473 1474
{
	int error = assign_type(fl, arg);

	if (error)
		return error;
1475
	lease_clear_pending(fl, arg);
L
Linus Torvalds 已提交
1476
	locks_wake_up_blocks(fl);
1477 1478 1479 1480 1481
	if (arg == F_UNLCK) {
		struct file *filp = fl->fl_file;

		f_delown(filp);
		filp->f_owner.signum = 0;
1482 1483 1484 1485 1486
		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
		if (fl->fl_fasync != NULL) {
			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
			fl->fl_fasync = NULL;
		}
1487
		locks_delete_lock_ctx(fl, dispose);
1488
	}
L
Linus Torvalds 已提交
1489 1490 1491 1492
	return 0;
}
EXPORT_SYMBOL(lease_modify);

1493 1494 1495 1496 1497 1498 1499 1500
static bool past_time(unsigned long then)
{
	if (!then)
		/* 0 is a special value meaning "this never expires": */
		return false;
	return time_after(jiffies, then);
}

1501
static void time_out_leases(struct inode *inode, struct list_head *dispose)
L
Linus Torvalds 已提交
1502
{
1503 1504
	struct file_lock_context *ctx = inode->i_flctx;
	struct file_lock *fl, *tmp;
L
Linus Torvalds 已提交
1505

1506
	lockdep_assert_held(&ctx->flc_lock);
1507

1508
	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1509
		trace_time_out_leases(inode, fl);
1510
		if (past_time(fl->fl_downgrade_time))
1511
			lease_modify(fl, F_RDLCK, dispose);
1512
		if (past_time(fl->fl_break_time))
1513
			lease_modify(fl, F_UNLCK, dispose);
L
Linus Torvalds 已提交
1514 1515 1516
	}
}

J
J. Bruce Fields 已提交
1517 1518
static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{
I
Ira Weiny 已提交
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
	bool rc;

	if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
		rc = false;
		goto trace;
	}
	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
		rc = false;
		goto trace;
	}

	rc = locks_conflict(breaker, lease);
trace:
	trace_leases_conflict(rc, lease, breaker);
	return rc;
J
J. Bruce Fields 已提交
1534 1535
}

1536 1537 1538
static bool
any_leases_conflict(struct inode *inode, struct file_lock *breaker)
{
1539
	struct file_lock_context *ctx = inode->i_flctx;
1540 1541
	struct file_lock *fl;

1542
	lockdep_assert_held(&ctx->flc_lock);
1543

1544
	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1545 1546 1547 1548 1549 1550
		if (leases_conflict(fl, breaker))
			return true;
	}
	return false;
}

L
Linus Torvalds 已提交
1551 1552 1553
/**
 *	__break_lease	-	revoke all outstanding leases on file
 *	@inode: the inode of the file to return
J
J. Bruce Fields 已提交
1554 1555 1556 1557
 *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
 *	    break all leases
 *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
 *	    only delegations
L
Linus Torvalds 已提交
1558
 *
1559 1560 1561
 *	break_lease (inlined for speed) has checked there already is at least
 *	some kind of lock (maybe a lease) on this file.  Leases are broken on
 *	a call to open() or truncate().  This function can sleep unless you
L
Linus Torvalds 已提交
1562 1563
 *	specified %O_NONBLOCK to your open().
 */
J
J. Bruce Fields 已提交
1564
int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
L
Linus Torvalds 已提交
1565
{
1566
	int error = 0;
1567
	struct file_lock_context *ctx;
1568
	struct file_lock *new_fl, *fl, *tmp;
L
Linus Torvalds 已提交
1569
	unsigned long break_time;
1570
	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1571
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
1572

1573
	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1574 1575
	if (IS_ERR(new_fl))
		return PTR_ERR(new_fl);
J
J. Bruce Fields 已提交
1576
	new_fl->fl_flags = type;
L
Linus Torvalds 已提交
1577

1578
	/* typically we will check that ctx is non-NULL before calling */
1579
	ctx = smp_load_acquire(&inode->i_flctx);
1580 1581
	if (!ctx) {
		WARN_ON_ONCE(1);
1582
		goto free_lock;
1583 1584
	}

1585
	percpu_down_read(&file_rwsem);
1586
	spin_lock(&ctx->flc_lock);
L
Linus Torvalds 已提交
1587

1588
	time_out_leases(inode, &dispose);
L
Linus Torvalds 已提交
1589

1590
	if (!any_leases_conflict(inode, new_fl))
1591 1592
		goto out;

L
Linus Torvalds 已提交
1593 1594 1595 1596 1597 1598 1599
	break_time = 0;
	if (lease_break_time > 0) {
		break_time = jiffies + lease_break_time * HZ;
		if (break_time == 0)
			break_time++;	/* so that 0 means no break time */
	}

1600
	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
J
J. Bruce Fields 已提交
1601 1602
		if (!leases_conflict(fl, new_fl))
			continue;
1603 1604 1605 1606
		if (want_write) {
			if (fl->fl_flags & FL_UNLOCK_PENDING)
				continue;
			fl->fl_flags |= FL_UNLOCK_PENDING;
L
Linus Torvalds 已提交
1607
			fl->fl_break_time = break_time;
1608
		} else {
1609
			if (lease_breaking(fl))
1610 1611 1612
				continue;
			fl->fl_flags |= FL_DOWNGRADE_PENDING;
			fl->fl_downgrade_time = break_time;
L
Linus Torvalds 已提交
1613
		}
J
Jeff Layton 已提交
1614
		if (fl->fl_lmops->lm_break(fl))
1615
			locks_delete_lock_ctx(fl, &dispose);
L
Linus Torvalds 已提交
1616 1617
	}

1618
	if (list_empty(&ctx->flc_lease))
J
Jeff Layton 已提交
1619 1620
		goto out;

1621
	if (mode & O_NONBLOCK) {
1622
		trace_break_lease_noblock(inode, new_fl);
L
Linus Torvalds 已提交
1623 1624 1625 1626 1627
		error = -EWOULDBLOCK;
		goto out;
	}

restart:
1628 1629
	fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
	break_time = fl->fl_break_time;
1630
	if (break_time != 0)
L
Linus Torvalds 已提交
1631
		break_time -= jiffies;
1632 1633
	if (break_time == 0)
		break_time++;
1634
	locks_insert_block(fl, new_fl, leases_conflict);
1635
	trace_break_lease_block(inode, new_fl);
1636
	spin_unlock(&ctx->flc_lock);
1637
	percpu_up_read(&file_rwsem);
1638

1639
	locks_dispose_list(&dispose);
1640
	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1641
						!new_fl->fl_blocker, break_time);
1642

1643
	percpu_down_read(&file_rwsem);
1644
	spin_lock(&ctx->flc_lock);
1645
	trace_break_lease_unblock(inode, new_fl);
1646
	locks_delete_block(new_fl);
L
Linus Torvalds 已提交
1647
	if (error >= 0) {
1648 1649 1650 1651
		/*
		 * Wait for the next conflicting lease that has not been
		 * broken yet
		 */
1652 1653 1654 1655
		if (error == 0)
			time_out_leases(inode, &dispose);
		if (any_leases_conflict(inode, new_fl))
			goto restart;
L
Linus Torvalds 已提交
1656 1657 1658
		error = 0;
	}
out:
1659
	spin_unlock(&ctx->flc_lock);
1660
	percpu_up_read(&file_rwsem);
1661
	locks_dispose_list(&dispose);
1662
free_lock:
1663
	locks_free_lock(new_fl);
L
Linus Torvalds 已提交
1664 1665 1666 1667 1668
	return error;
}
EXPORT_SYMBOL(__break_lease);

/**
1669
 *	lease_get_mtime - update modified time of an inode with exclusive lease
L
Linus Torvalds 已提交
1670
 *	@inode: the inode
1671
 *      @time:  pointer to a timespec which contains the last modified time
L
Linus Torvalds 已提交
1672 1673 1674
 *
 * This is to force NFS clients to flush their caches for files with
 * exclusive leases.  The justification is that if someone has an
1675
 * exclusive lease, then they could be modifying it.
L
Linus Torvalds 已提交
1676
 */
1677
void lease_get_mtime(struct inode *inode, struct timespec64 *time)
L
Linus Torvalds 已提交
1678
{
1679
	bool has_lease = false;
1680
	struct file_lock_context *ctx;
1681
	struct file_lock *fl;
1682

1683
	ctx = smp_load_acquire(&inode->i_flctx);
1684
	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1685
		spin_lock(&ctx->flc_lock);
1686 1687 1688 1689
		fl = list_first_entry_or_null(&ctx->flc_lease,
					      struct file_lock, fl_list);
		if (fl && (fl->fl_type == F_WRLCK))
			has_lease = true;
1690
		spin_unlock(&ctx->flc_lock);
1691 1692 1693
	}

	if (has_lease)
1694
		*time = current_time(inode);
L
Linus Torvalds 已提交
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
}
EXPORT_SYMBOL(lease_get_mtime);

/**
 *	fcntl_getlease - Enquire what lease is currently active
 *	@filp: the file
 *
 *	The value returned by this function will be one of
 *	(if no lease break is pending):
 *
 *	%F_RDLCK to indicate a shared lease is held.
 *
 *	%F_WRLCK to indicate an exclusive lease is held.
 *
 *	%F_UNLCK to indicate no lease is held.
 *
 *	(if a lease break is pending):
 *
 *	%F_RDLCK to indicate an exclusive lease needs to be
 *		changed to a shared lease (or removed).
 *
 *	%F_UNLCK to indicate the lease needs to be removed.
 *
 *	XXX: sfr & willy disagree over whether F_INPROGRESS
 *	should be returned to userspace.
 */
int fcntl_getlease(struct file *filp)
{
	struct file_lock *fl;
1724
	struct inode *inode = locks_inode(filp);
1725
	struct file_lock_context *ctx;
L
Linus Torvalds 已提交
1726
	int type = F_UNLCK;
1727
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
1728

1729
	ctx = smp_load_acquire(&inode->i_flctx);
1730
	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1731
		percpu_down_read(&file_rwsem);
1732
		spin_lock(&ctx->flc_lock);
1733
		time_out_leases(inode, &dispose);
1734 1735 1736
		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
			if (fl->fl_file != filp)
				continue;
1737
			type = target_leasetype(fl);
L
Linus Torvalds 已提交
1738 1739
			break;
		}
1740
		spin_unlock(&ctx->flc_lock);
1741
		percpu_up_read(&file_rwsem);
1742

1743
		locks_dispose_list(&dispose);
L
Linus Torvalds 已提交
1744 1745 1746 1747
	}
	return type;
}

1748
/**
1749
 * check_conflicting_open - see if the given file points to an inode that has
1750 1751
 *			    an existing open that would conflict with the
 *			    desired lease.
1752
 * @filp:	file to check
1753
 * @arg:	type of lease that we're trying to acquire
1754
 * @flags:	current lock flags
1755 1756 1757 1758 1759
 *
 * Check to see if there's an existing open fd on this file that would
 * conflict with the lease we're trying to set.
 */
static int
1760
check_conflicting_open(struct file *filp, const long arg, int flags)
1761
{
1762 1763
	struct inode *inode = locks_inode(filp);
	int self_wcount = 0, self_rcount = 0;
1764

C
Christoph Hellwig 已提交
1765 1766 1767
	if (flags & FL_LAYOUT)
		return 0;

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
	if (arg == F_RDLCK)
		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
	else if (arg != F_WRLCK)
		return 0;

	/*
	 * Make sure that only read/write count is from lease requestor.
	 * Note that this will result in denying write leases when i_writecount
	 * is negative, which is what we want.  (We shouldn't grant write leases
	 * on files open for execution.)
	 */
	if (filp->f_mode & FMODE_WRITE)
		self_wcount = 1;
	else if (filp->f_mode & FMODE_READ)
		self_rcount = 1;
1783

1784 1785 1786
	if (atomic_read(&inode->i_writecount) != self_wcount ||
	    atomic_read(&inode->i_readcount) != self_rcount)
		return -EAGAIN;
1787

1788
	return 0;
1789 1790
}

1791 1792
static int
generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
L
Linus Torvalds 已提交
1793
{
1794
	struct file_lock *fl, *my_fl = NULL, *lease;
1795
	struct inode *inode = locks_inode(filp);
1796
	struct file_lock_context *ctx;
J
J. Bruce Fields 已提交
1797
	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
J
J. Bruce Fields 已提交
1798
	int error;
1799
	LIST_HEAD(dispose);
L
Linus Torvalds 已提交
1800

1801
	lease = *flp;
1802 1803
	trace_generic_add_lease(inode, lease);

1804 1805
	/* Note that arg is never F_UNLCK here */
	ctx = locks_get_lock_context(inode, arg);
1806 1807 1808
	if (!ctx)
		return -ENOMEM;

J
J. Bruce Fields 已提交
1809 1810 1811 1812 1813 1814 1815 1816
	/*
	 * In the delegation case we need mutual exclusion with
	 * a number of operations that take the i_mutex.  We trylock
	 * because delegations are an optional optimization, and if
	 * there's some chance of a conflict--we'd rather not
	 * bother, maybe that's a sign this just isn't a good file to
	 * hand out a delegation on.
	 */
A
Al Viro 已提交
1817
	if (is_deleg && !inode_trylock(inode))
J
J. Bruce Fields 已提交
1818 1819 1820 1821
		return -EAGAIN;

	if (is_deleg && arg == F_WRLCK) {
		/* Write delegations are not currently supported: */
A
Al Viro 已提交
1822
		inode_unlock(inode);
J
J. Bruce Fields 已提交
1823 1824 1825
		WARN_ON_ONCE(1);
		return -EINVAL;
	}
1826

1827
	percpu_down_read(&file_rwsem);
1828
	spin_lock(&ctx->flc_lock);
1829
	time_out_leases(inode, &dispose);
1830
	error = check_conflicting_open(filp, arg, lease->fl_flags);
1831
	if (error)
1832
		goto out;
1833

L
Linus Torvalds 已提交
1834 1835 1836 1837 1838 1839 1840 1841
	/*
	 * At this point, we know that if there is an exclusive
	 * lease on this file, then we hold it on this filp
	 * (otherwise our open of this file would have blocked).
	 * And if we are trying to acquire an exclusive lease,
	 * then the file is not open by anyone (including us)
	 * except for this filp.
	 */
J
J. Bruce Fields 已提交
1842
	error = -EAGAIN;
1843
	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1844 1845
		if (fl->fl_file == filp &&
		    fl->fl_owner == lease->fl_owner) {
1846
			my_fl = fl;
J
J. Bruce Fields 已提交
1847 1848
			continue;
		}
1849

J
J. Bruce Fields 已提交
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
		/*
		 * No exclusive leases if someone else has a lease on
		 * this file:
		 */
		if (arg == F_WRLCK)
			goto out;
		/*
		 * Modifying our existing lease is OK, but no getting a
		 * new lease if someone else is opening for write:
		 */
		if (fl->fl_flags & FL_UNLOCK_PENDING)
			goto out;
L
Linus Torvalds 已提交
1862 1863
	}

1864
	if (my_fl != NULL) {
1865 1866
		lease = my_fl;
		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1867 1868 1869
		if (error)
			goto out;
		goto out_setup;
L
Linus Torvalds 已提交
1870 1871 1872 1873 1874 1875
	}

	error = -EINVAL;
	if (!leases_enable)
		goto out;

1876
	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
	/*
	 * The check in break_lease() is lockless. It's possible for another
	 * open to race in after we did the earlier check for a conflicting
	 * open but before the lease was inserted. Check again for a
	 * conflicting open and cancel the lease if there is one.
	 *
	 * We also add a barrier here to ensure that the insertion of the lock
	 * precedes these checks.
	 */
	smp_mb();
1887
	error = check_conflicting_open(filp, arg, lease->fl_flags);
1888
	if (error) {
1889
		locks_unlink_lock_ctx(lease);
1890 1891
		goto out;
	}
1892 1893 1894 1895

out_setup:
	if (lease->fl_lmops->lm_setup)
		lease->fl_lmops->lm_setup(lease, priv);
L
Linus Torvalds 已提交
1896
out:
1897
	spin_unlock(&ctx->flc_lock);
1898
	percpu_up_read(&file_rwsem);
1899
	locks_dispose_list(&dispose);
J
J. Bruce Fields 已提交
1900
	if (is_deleg)
A
Al Viro 已提交
1901
		inode_unlock(inode);
1902
	if (!error && !my_fl)
1903
		*flp = NULL;
L
Linus Torvalds 已提交
1904 1905
	return error;
}
1906

1907
static int generic_delete_lease(struct file *filp, void *owner)
1908
{
1909
	int error = -EAGAIN;
1910
	struct file_lock *fl, *victim = NULL;
1911
	struct inode *inode = locks_inode(filp);
1912
	struct file_lock_context *ctx;
1913
	LIST_HEAD(dispose);
1914

1915
	ctx = smp_load_acquire(&inode->i_flctx);
1916 1917 1918 1919 1920
	if (!ctx) {
		trace_generic_delete_lease(inode, NULL);
		return error;
	}

1921
	percpu_down_read(&file_rwsem);
1922
	spin_lock(&ctx->flc_lock);
1923
	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1924 1925
		if (fl->fl_file == filp &&
		    fl->fl_owner == owner) {
1926
			victim = fl;
1927
			break;
1928
		}
1929
	}
1930
	trace_generic_delete_lease(inode, victim);
1931
	if (victim)
1932
		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1933
	spin_unlock(&ctx->flc_lock);
1934
	percpu_up_read(&file_rwsem);
1935
	locks_dispose_list(&dispose);
1936
	return error;
1937 1938 1939 1940
}

/**
 *	generic_setlease	-	sets a lease on an open file
1941 1942 1943 1944 1945
 *	@filp:	file pointer
 *	@arg:	type of lease to obtain
 *	@flp:	input - file_lock to use, output - file_lock inserted
 *	@priv:	private data for lm_setup (may be NULL if lm_setup
 *		doesn't require it)
1946 1947 1948 1949
 *
 *	The (input) flp->fl_lmops->lm_break function is required
 *	by break_lease().
 */
1950 1951
int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
			void **priv)
1952
{
1953
	struct inode *inode = locks_inode(filp);
1954 1955
	int error;

1956
	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1957 1958 1959 1960 1961 1962 1963 1964 1965
		return -EACCES;
	if (!S_ISREG(inode->i_mode))
		return -EINVAL;
	error = security_file_lock(filp, arg);
	if (error)
		return error;

	switch (arg) {
	case F_UNLCK:
1966
		return generic_delete_lease(filp, *priv);
1967 1968
	case F_RDLCK:
	case F_WRLCK:
1969 1970 1971 1972
		if (!(*flp)->fl_lmops->lm_break) {
			WARN_ON_ONCE(1);
			return -ENOLCK;
		}
C
Christoph Hellwig 已提交
1973

1974
		return generic_add_lease(filp, arg, flp, priv);
1975
	default:
1976
		return -EINVAL;
1977 1978
	}
}
1979
EXPORT_SYMBOL(generic_setlease);
L
Linus Torvalds 已提交
1980

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
#if IS_ENABLED(CONFIG_SRCU)
/*
 * Kernel subsystems can register to be notified on any attempt to set
 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
 * to close files that it may have cached when there is an attempt to set a
 * conflicting lease.
 */
static struct srcu_notifier_head lease_notifier_chain;

static inline void
lease_notifier_chain_init(void)
{
	srcu_init_notifier_head(&lease_notifier_chain);
}

static inline void
setlease_notifier(long arg, struct file_lock *lease)
{
	if (arg != F_UNLCK)
		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
}

int lease_register_notifier(struct notifier_block *nb)
{
	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(lease_register_notifier);

void lease_unregister_notifier(struct notifier_block *nb)
{
	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(lease_unregister_notifier);

#else /* !IS_ENABLED(CONFIG_SRCU) */
static inline void
lease_notifier_chain_init(void)
{
}

static inline void
setlease_notifier(long arg, struct file_lock *lease)
{
}

int lease_register_notifier(struct notifier_block *nb)
{
	return 0;
}
EXPORT_SYMBOL_GPL(lease_register_notifier);

void lease_unregister_notifier(struct notifier_block *nb)
{
}
EXPORT_SYMBOL_GPL(lease_unregister_notifier);

#endif /* IS_ENABLED(CONFIG_SRCU) */

2039
/**
2040
 * vfs_setlease        -       sets a lease on an open file
2041 2042 2043 2044
 * @filp:	file pointer
 * @arg:	type of lease to obtain
 * @lease:	file_lock to use when adding a lease
 * @priv:	private info for lm_setup when adding a lease (may be
2045
 *		NULL if lm_setup doesn't require it)
2046 2047 2048
 *
 * Call this to establish a lease on the file. The "lease" argument is not
 * used for F_UNLCK requests and may be NULL. For commands that set or alter
2049 2050
 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
 * set; if not, this function will return -ENOLCK (and generate a scary-looking
2051
 * stack trace).
2052 2053 2054
 *
 * The "priv" pointer is passed directly to the lm_setup function as-is. It
 * may be NULL if the lm_setup operation doesn't require it.
L
Linus Torvalds 已提交
2055
 */
2056 2057
int
vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
L
Linus Torvalds 已提交
2058
{
2059 2060
	if (lease)
		setlease_notifier(arg, *lease);
2061
	if (filp->f_op->setlease)
2062
		return filp->f_op->setlease(filp, arg, lease, priv);
2063
	else
2064
		return generic_setlease(filp, arg, lease, priv);
L
Linus Torvalds 已提交
2065
}
2066
EXPORT_SYMBOL_GPL(vfs_setlease);
L
Linus Torvalds 已提交
2067

2068
static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
L
Linus Torvalds 已提交
2069
{
2070
	struct file_lock *fl;
2071
	struct fasync_struct *new;
L
Linus Torvalds 已提交
2072 2073
	int error;

2074 2075 2076
	fl = lease_alloc(filp, arg);
	if (IS_ERR(fl))
		return PTR_ERR(fl);
L
Linus Torvalds 已提交
2077

2078 2079 2080 2081 2082
	new = fasync_alloc();
	if (!new) {
		locks_free_lock(fl);
		return -ENOMEM;
	}
2083
	new->fa_fd = fd;
2084

2085
	error = vfs_setlease(filp, arg, &fl, (void **)&new);
2086 2087
	if (fl)
		locks_free_lock(fl);
2088 2089
	if (new)
		fasync_free(new);
L
Linus Torvalds 已提交
2090 2091 2092
	return error;
}

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
/**
 *	fcntl_setlease	-	sets a lease on an open file
 *	@fd: open file descriptor
 *	@filp: file pointer
 *	@arg: type of lease to obtain
 *
 *	Call this fcntl to establish a lease on the file.
 *	Note that you also need to call %F_SETSIG to
 *	receive a signal when the lease is broken.
 */
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
	if (arg == F_UNLCK)
2106
		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2107 2108 2109
	return do_fcntl_add_lease(fd, filp, arg);
}

L
Linus Torvalds 已提交
2110
/**
2111 2112
 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
 * @inode: inode of the file to apply to
L
Linus Torvalds 已提交
2113 2114
 * @fl: The lock to be applied
 *
2115
 * Apply a FLOCK style lock request to an inode.
L
Linus Torvalds 已提交
2116
 */
2117
static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
L
Linus Torvalds 已提交
2118 2119 2120 2121
{
	int error;
	might_sleep();
	for (;;) {
2122
		error = flock_lock_inode(inode, fl);
2123
		if (error != FILE_LOCK_DEFERRED)
L
Linus Torvalds 已提交
2124
			break;
2125
		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2126 2127
		if (error)
			break;
L
Linus Torvalds 已提交
2128
	}
2129
	locks_delete_block(fl);
L
Linus Torvalds 已提交
2130 2131 2132
	return error;
}

2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
/**
 * locks_lock_inode_wait - Apply a lock to an inode
 * @inode: inode of the file to apply to
 * @fl: The lock to be applied
 *
 * Apply a POSIX or FLOCK style lock request to an inode.
 */
int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
{
	int res = 0;
	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
		case FL_POSIX:
			res = posix_lock_inode_wait(inode, fl);
			break;
		case FL_FLOCK:
			res = flock_lock_inode_wait(inode, fl);
			break;
		default:
			BUG();
	}
	return res;
}
EXPORT_SYMBOL(locks_lock_inode_wait);

L
Linus Torvalds 已提交
2157 2158 2159 2160 2161 2162
/**
 *	sys_flock: - flock() system call.
 *	@fd: the file descriptor to lock.
 *	@cmd: the type of lock to apply.
 *
 *	Apply a %FL_FLOCK style lock to an open file descriptor.
2163
 *	The @cmd can be one of:
L
Linus Torvalds 已提交
2164
 *
2165 2166 2167 2168 2169
 *	- %LOCK_SH -- a shared lock.
 *	- %LOCK_EX -- an exclusive lock.
 *	- %LOCK_UN -- remove an existing lock.
 *	- %LOCK_MAND -- a 'mandatory' flock.
 *	  This exists to emulate Windows Share Modes.
L
Linus Torvalds 已提交
2170 2171 2172 2173
 *
 *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
 *	processes read and write access respectively.
 */
2174
SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
L
Linus Torvalds 已提交
2175
{
2176
	struct fd f = fdget(fd);
L
Linus Torvalds 已提交
2177 2178 2179 2180 2181
	struct file_lock *lock;
	int can_sleep, unlock;
	int error;

	error = -EBADF;
2182
	if (!f.file)
L
Linus Torvalds 已提交
2183 2184 2185 2186 2187 2188
		goto out;

	can_sleep = !(cmd & LOCK_NB);
	cmd &= ~LOCK_NB;
	unlock = (cmd == LOCK_UN);

2189
	if (!unlock && !(cmd & LOCK_MAND) &&
2190
	    !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
L
Linus Torvalds 已提交
2191 2192
		goto out_putf;

2193
	lock = flock_make_lock(f.file, cmd, NULL);
2194 2195
	if (IS_ERR(lock)) {
		error = PTR_ERR(lock);
L
Linus Torvalds 已提交
2196
		goto out_putf;
2197 2198
	}

L
Linus Torvalds 已提交
2199 2200 2201
	if (can_sleep)
		lock->fl_flags |= FL_SLEEP;

2202
	error = security_file_lock(f.file, lock->fl_type);
L
Linus Torvalds 已提交
2203 2204 2205
	if (error)
		goto out_free;

2206
	if (f.file->f_op->flock)
2207
		error = f.file->f_op->flock(f.file,
L
Linus Torvalds 已提交
2208 2209 2210
					  (can_sleep) ? F_SETLKW : F_SETLK,
					  lock);
	else
2211
		error = locks_lock_file_wait(f.file, lock);
L
Linus Torvalds 已提交
2212 2213

 out_free:
2214
	locks_free_lock(lock);
L
Linus Torvalds 已提交
2215 2216

 out_putf:
2217
	fdput(f);
L
Linus Torvalds 已提交
2218 2219 2220 2221
 out:
	return error;
}

2222 2223 2224
/**
 * vfs_test_lock - test file byte range lock
 * @filp: The file to test lock for
2225
 * @fl: The lock to test; also used to hold result
2226 2227 2228 2229 2230 2231
 *
 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
 * setting conf->fl_type to something other than F_UNLCK.
 */
int vfs_test_lock(struct file *filp, struct file_lock *fl)
{
2232
	if (filp->f_op->lock)
2233 2234 2235 2236 2237 2238
		return filp->f_op->lock(filp, F_GETLK, fl);
	posix_test_lock(filp, fl);
	return 0;
}
EXPORT_SYMBOL_GPL(vfs_test_lock);

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
/**
 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
 * @fl: The file_lock who's fl_pid should be translated
 * @ns: The namespace into which the pid should be translated
 *
 * Used to tranlate a fl_pid into a namespace virtual pid number
 */
static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
{
	pid_t vnr;
	struct pid *pid;

	if (IS_OFDLCK(fl))
		return -1;
	if (IS_REMOTELCK(fl))
		return fl->fl_pid;
2255 2256 2257 2258 2259 2260 2261
	/*
	 * If the flock owner process is dead and its pid has been already
	 * freed, the translation below won't work, but we still want to show
	 * flock owner pid number in init pidns.
	 */
	if (ns == &init_pid_ns)
		return (pid_t)fl->fl_pid;
2262 2263 2264 2265 2266 2267 2268 2269

	rcu_read_lock();
	pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
	vnr = pid_nr_ns(pid, ns);
	rcu_read_unlock();
	return vnr;
}

2270 2271
static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
{
2272
	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
#if BITS_PER_LONG == 32
	/*
	 * Make sure we can represent the posix lock via
	 * legacy 32bit flock.
	 */
	if (fl->fl_start > OFFT_OFFSET_MAX)
		return -EOVERFLOW;
	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
		return -EOVERFLOW;
#endif
	flock->l_start = fl->fl_start;
	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
		fl->fl_end - fl->fl_start + 1;
	flock->l_whence = 0;
2287
	flock->l_type = fl->fl_type;
2288 2289 2290 2291 2292 2293
	return 0;
}

#if BITS_PER_LONG == 32
static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
{
2294
	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2295 2296 2297 2298 2299 2300 2301 2302
	flock->l_start = fl->fl_start;
	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
		fl->fl_end - fl->fl_start + 1;
	flock->l_whence = 0;
	flock->l_type = fl->fl_type;
}
#endif

L
Linus Torvalds 已提交
2303 2304 2305
/* Report the first existing lock that would conflict with l.
 * This implements the F_GETLK command of fcntl().
 */
2306
int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
L
Linus Torvalds 已提交
2307
{
2308
	struct file_lock *fl;
L
Linus Torvalds 已提交
2309 2310
	int error;

2311 2312 2313
	fl = locks_alloc_lock();
	if (fl == NULL)
		return -ENOMEM;
L
Linus Torvalds 已提交
2314
	error = -EINVAL;
2315
	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
L
Linus Torvalds 已提交
2316 2317
		goto out;

2318
	error = flock_to_posix_lock(filp, fl, flock);
L
Linus Torvalds 已提交
2319 2320 2321
	if (error)
		goto out;

2322
	if (cmd == F_OFD_GETLK) {
2323
		error = -EINVAL;
2324
		if (flock->l_pid != 0)
2325 2326
			goto out;

2327
		cmd = F_GETLK;
2328 2329
		fl->fl_flags |= FL_OFDLCK;
		fl->fl_owner = filp;
2330 2331
	}

2332
	error = vfs_test_lock(filp, fl);
2333 2334
	if (error)
		goto out;
2335

2336 2337 2338
	flock->l_type = fl->fl_type;
	if (fl->fl_type != F_UNLCK) {
		error = posix_lock_to_flock(flock, fl);
2339
		if (error)
2340
			goto out;
L
Linus Torvalds 已提交
2341 2342
	}
out:
2343
	locks_free_lock(fl);
L
Linus Torvalds 已提交
2344 2345 2346
	return error;
}

2347 2348 2349 2350 2351
/**
 * vfs_lock_file - file byte range lock
 * @filp: The file to apply the lock to
 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
 * @fl: The lock to be applied
2352 2353 2354 2355 2356 2357 2358 2359
 * @conf: Place to return a copy of the conflicting lock, if found.
 *
 * A caller that doesn't care about the conflicting lock may pass NULL
 * as the final argument.
 *
 * If the filesystem defines a private ->lock() method, then @conf will
 * be left unchanged; so a caller that cares should initialize it to
 * some acceptable default.
2360 2361 2362 2363
 *
 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
 * locks, the ->lock() interface may return asynchronously, before the lock has
 * been granted or denied by the underlying filesystem, if (and only if)
J
J. Bruce Fields 已提交
2364
 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2365 2366
 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
 * the request is for a blocking lock. When ->lock() does return asynchronously,
J
J. Bruce Fields 已提交
2367
 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2368 2369
 * request completes.
 * If the request is for non-blocking lock the file system should return
2370 2371
 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
 * with the result. If the request timed out the callback routine will return a
2372 2373 2374 2375 2376
 * nonzero return code and the file system should release the lock. The file
 * system is also responsible to keep a corresponding posix lock when it
 * grants a lock so the VFS can find out which locks are locally held and do
 * the correct lock cleanup when required.
 * The underlying filesystem must not drop the kernel lock or call
J
J. Bruce Fields 已提交
2377
 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2378
 * return code.
2379
 */
2380
int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2381
{
2382
	if (filp->f_op->lock)
2383 2384
		return filp->f_op->lock(filp, cmd, fl);
	else
2385
		return posix_lock_file(filp, fl, conf);
2386 2387 2388
}
EXPORT_SYMBOL_GPL(vfs_lock_file);

M
Miklos Szeredi 已提交
2389 2390 2391 2392 2393 2394 2395 2396 2397
static int do_lock_file_wait(struct file *filp, unsigned int cmd,
			     struct file_lock *fl)
{
	int error;

	error = security_file_lock(filp, fl->fl_type);
	if (error)
		return error;

2398 2399 2400
	for (;;) {
		error = vfs_lock_file(filp, cmd, fl, NULL);
		if (error != FILE_LOCK_DEFERRED)
M
Miklos Szeredi 已提交
2401
			break;
2402
		error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2403 2404
		if (error)
			break;
M
Miklos Szeredi 已提交
2405
	}
2406
	locks_delete_block(fl);
M
Miklos Szeredi 已提交
2407 2408 2409 2410

	return error;
}

2411
/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
static int
check_fmode_for_setlk(struct file_lock *fl)
{
	switch (fl->fl_type) {
	case F_RDLCK:
		if (!(fl->fl_file->f_mode & FMODE_READ))
			return -EBADF;
		break;
	case F_WRLCK:
		if (!(fl->fl_file->f_mode & FMODE_WRITE))
			return -EBADF;
	}
	return 0;
}

L
Linus Torvalds 已提交
2427 2428 2429
/* Apply the lock described by l to an open file descriptor.
 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
 */
2430
int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2431
		struct flock *flock)
L
Linus Torvalds 已提交
2432 2433
{
	struct file_lock *file_lock = locks_alloc_lock();
2434
	struct inode *inode = locks_inode(filp);
2435
	struct file *f;
L
Linus Torvalds 已提交
2436 2437 2438 2439 2440 2441 2442 2443
	int error;

	if (file_lock == NULL)
		return -ENOLCK;

	/* Don't allow mandatory locks on files that may be memory mapped
	 * and shared.
	 */
2444
	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
L
Linus Torvalds 已提交
2445 2446 2447 2448
		error = -EAGAIN;
		goto out;
	}

2449
	error = flock_to_posix_lock(filp, file_lock, flock);
L
Linus Torvalds 已提交
2450 2451
	if (error)
		goto out;
2452

2453 2454 2455 2456
	error = check_fmode_for_setlk(file_lock);
	if (error)
		goto out;

2457 2458
	/*
	 * If the cmd is requesting file-private locks, then set the
2459
	 * FL_OFDLCK flag and override the owner.
2460 2461
	 */
	switch (cmd) {
2462
	case F_OFD_SETLK:
2463
		error = -EINVAL;
2464
		if (flock->l_pid != 0)
2465 2466
			goto out;

2467
		cmd = F_SETLK;
2468
		file_lock->fl_flags |= FL_OFDLCK;
2469
		file_lock->fl_owner = filp;
2470
		break;
2471
	case F_OFD_SETLKW:
2472
		error = -EINVAL;
2473
		if (flock->l_pid != 0)
2474 2475
			goto out;

2476
		cmd = F_SETLKW;
2477
		file_lock->fl_flags |= FL_OFDLCK;
2478
		file_lock->fl_owner = filp;
2479 2480
		/* Fallthrough */
	case F_SETLKW:
L
Linus Torvalds 已提交
2481 2482
		file_lock->fl_flags |= FL_SLEEP;
	}
2483

M
Miklos Szeredi 已提交
2484
	error = do_lock_file_wait(filp, cmd, file_lock);
L
Linus Torvalds 已提交
2485

2486
	/*
2487 2488 2489
	 * Attempt to detect a close/fcntl race and recover by releasing the
	 * lock that was just acquired. There is no need to do that when we're
	 * unlocking though, or for OFD locks.
2490
	 */
2491 2492
	if (!error && file_lock->fl_type != F_UNLCK &&
	    !(file_lock->fl_flags & FL_OFDLCK)) {
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		/*
		 * We need that spin_lock here - it prevents reordering between
		 * update of i_flctx->flc_posix and check for it done in
		 * close(). rcu_read_lock() wouldn't do.
		 */
		spin_lock(&current->files->file_lock);
		f = fcheck(fd);
		spin_unlock(&current->files->file_lock);
		if (f != filp) {
			file_lock->fl_type = F_UNLCK;
			error = do_lock_file_wait(filp, cmd, file_lock);
			WARN_ON_ONCE(error);
			error = -EBADF;
		}
L
Linus Torvalds 已提交
2507
	}
2508
out:
2509
	trace_fcntl_setlk(inode, file_lock, error);
L
Linus Torvalds 已提交
2510 2511 2512 2513 2514 2515 2516 2517
	locks_free_lock(file_lock);
	return error;
}

#if BITS_PER_LONG == 32
/* Report the first existing lock that would conflict with l.
 * This implements the F_GETLK command of fcntl().
 */
2518
int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
L
Linus Torvalds 已提交
2519
{
2520
	struct file_lock *fl;
L
Linus Torvalds 已提交
2521 2522
	int error;

2523 2524 2525 2526
	fl = locks_alloc_lock();
	if (fl == NULL)
		return -ENOMEM;

L
Linus Torvalds 已提交
2527
	error = -EINVAL;
2528
	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
L
Linus Torvalds 已提交
2529 2530
		goto out;

2531
	error = flock64_to_posix_lock(filp, fl, flock);
L
Linus Torvalds 已提交
2532 2533 2534
	if (error)
		goto out;

2535
	if (cmd == F_OFD_GETLK) {
2536
		error = -EINVAL;
2537
		if (flock->l_pid != 0)
2538 2539
			goto out;

2540
		cmd = F_GETLK64;
2541 2542
		fl->fl_flags |= FL_OFDLCK;
		fl->fl_owner = filp;
2543 2544
	}

2545
	error = vfs_test_lock(filp, fl);
2546 2547 2548
	if (error)
		goto out;

2549 2550 2551
	flock->l_type = fl->fl_type;
	if (fl->fl_type != F_UNLCK)
		posix_lock_to_flock64(flock, fl);
2552

L
Linus Torvalds 已提交
2553
out:
2554
	locks_free_lock(fl);
L
Linus Torvalds 已提交
2555 2556 2557 2558 2559 2560
	return error;
}

/* Apply the lock described by l to an open file descriptor.
 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
 */
2561
int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2562
		struct flock64 *flock)
L
Linus Torvalds 已提交
2563 2564
{
	struct file_lock *file_lock = locks_alloc_lock();
2565
	struct inode *inode = locks_inode(filp);
2566
	struct file *f;
L
Linus Torvalds 已提交
2567 2568 2569 2570 2571 2572 2573 2574
	int error;

	if (file_lock == NULL)
		return -ENOLCK;

	/* Don't allow mandatory locks on files that may be memory mapped
	 * and shared.
	 */
2575
	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
L
Linus Torvalds 已提交
2576 2577 2578 2579
		error = -EAGAIN;
		goto out;
	}

2580
	error = flock64_to_posix_lock(filp, file_lock, flock);
L
Linus Torvalds 已提交
2581 2582
	if (error)
		goto out;
2583

2584 2585 2586 2587
	error = check_fmode_for_setlk(file_lock);
	if (error)
		goto out;

2588 2589
	/*
	 * If the cmd is requesting file-private locks, then set the
2590
	 * FL_OFDLCK flag and override the owner.
2591 2592
	 */
	switch (cmd) {
2593
	case F_OFD_SETLK:
2594
		error = -EINVAL;
2595
		if (flock->l_pid != 0)
2596 2597
			goto out;

2598
		cmd = F_SETLK64;
2599
		file_lock->fl_flags |= FL_OFDLCK;
2600
		file_lock->fl_owner = filp;
2601
		break;
2602
	case F_OFD_SETLKW:
2603
		error = -EINVAL;
2604
		if (flock->l_pid != 0)
2605 2606
			goto out;

2607
		cmd = F_SETLKW64;
2608
		file_lock->fl_flags |= FL_OFDLCK;
2609
		file_lock->fl_owner = filp;
2610 2611
		/* Fallthrough */
	case F_SETLKW64:
L
Linus Torvalds 已提交
2612 2613
		file_lock->fl_flags |= FL_SLEEP;
	}
2614

M
Miklos Szeredi 已提交
2615
	error = do_lock_file_wait(filp, cmd, file_lock);
L
Linus Torvalds 已提交
2616

2617
	/*
2618 2619 2620
	 * Attempt to detect a close/fcntl race and recover by releasing the
	 * lock that was just acquired. There is no need to do that when we're
	 * unlocking though, or for OFD locks.
2621
	 */
2622 2623
	if (!error && file_lock->fl_type != F_UNLCK &&
	    !(file_lock->fl_flags & FL_OFDLCK)) {
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
		/*
		 * We need that spin_lock here - it prevents reordering between
		 * update of i_flctx->flc_posix and check for it done in
		 * close(). rcu_read_lock() wouldn't do.
		 */
		spin_lock(&current->files->file_lock);
		f = fcheck(fd);
		spin_unlock(&current->files->file_lock);
		if (f != filp) {
			file_lock->fl_type = F_UNLCK;
			error = do_lock_file_wait(filp, cmd, file_lock);
			WARN_ON_ONCE(error);
			error = -EBADF;
		}
L
Linus Torvalds 已提交
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	}
out:
	locks_free_lock(file_lock);
	return error;
}
#endif /* BITS_PER_LONG == 32 */

/*
 * This function is called when the file is being removed
 * from the task's fd array.  POSIX locks belonging to this task
 * are deleted at this time.
 */
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
2652
	int error;
2653
	struct inode *inode = locks_inode(filp);
2654
	struct file_lock lock;
2655
	struct file_lock_context *ctx;
L
Linus Torvalds 已提交
2656 2657 2658 2659 2660 2661

	/*
	 * If there are no locks held on this file, we don't need to call
	 * posix_lock_file().  Another process could be setting a lock on this
	 * file at the same time, but we wouldn't remove that lock anyway.
	 */
2662
	ctx =  smp_load_acquire(&inode->i_flctx);
2663
	if (!ctx || list_empty(&ctx->flc_posix))
L
Linus Torvalds 已提交
2664 2665
		return;

2666
	locks_init_lock(&lock);
L
Linus Torvalds 已提交
2667
	lock.fl_type = F_UNLCK;
2668
	lock.fl_flags = FL_POSIX | FL_CLOSE;
L
Linus Torvalds 已提交
2669 2670 2671 2672 2673 2674 2675 2676
	lock.fl_start = 0;
	lock.fl_end = OFFSET_MAX;
	lock.fl_owner = owner;
	lock.fl_pid = current->tgid;
	lock.fl_file = filp;
	lock.fl_ops = NULL;
	lock.fl_lmops = NULL;

2677
	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
L
Linus Torvalds 已提交
2678 2679 2680

	if (lock.fl_ops && lock.fl_ops->fl_release_private)
		lock.fl_ops->fl_release_private(&lock);
2681
	trace_locks_remove_posix(inode, &lock, error);
L
Linus Torvalds 已提交
2682 2683 2684
}
EXPORT_SYMBOL(locks_remove_posix);

2685
/* The i_flctx must be valid when calling into here */
2686
static void
2687
locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2688
{
2689
	struct file_lock fl;
2690
	struct inode *inode = locks_inode(filp);
2691

2692
	if (list_empty(&flctx->flc_flock))
2693 2694
		return;

2695 2696 2697
	flock_make_lock(filp, LOCK_UN, &fl);
	fl.fl_flags |= FL_CLOSE;

2698
	if (filp->f_op->flock)
2699 2700
		filp->f_op->flock(filp, F_SETLKW, &fl);
	else
2701
		flock_lock_inode(inode, &fl);
2702 2703 2704 2705 2706

	if (fl.fl_ops && fl.fl_ops->fl_release_private)
		fl.fl_ops->fl_release_private(&fl);
}

2707
/* The i_flctx must be valid when calling into here */
2708
static void
2709
locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2710 2711 2712 2713
{
	struct file_lock *fl, *tmp;
	LIST_HEAD(dispose);

2714
	if (list_empty(&ctx->flc_lease))
2715 2716
		return;

2717
	percpu_down_read(&file_rwsem);
2718
	spin_lock(&ctx->flc_lock);
2719
	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2720 2721
		if (filp == fl->fl_file)
			lease_modify(fl, F_UNLCK, &dispose);
2722
	spin_unlock(&ctx->flc_lock);
2723
	percpu_up_read(&file_rwsem);
2724

2725 2726 2727
	locks_dispose_list(&dispose);
}

L
Linus Torvalds 已提交
2728 2729 2730
/*
 * This function is called on the last close of an open file.
 */
2731
void locks_remove_file(struct file *filp)
L
Linus Torvalds 已提交
2732
{
2733 2734
	struct file_lock_context *ctx;

2735
	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2736
	if (!ctx)
2737 2738
		return;

2739
	/* remove any OFD locks */
2740
	locks_remove_posix(filp, filp);
2741

2742
	/* remove flock locks */
2743
	locks_remove_flock(filp, ctx);
2744

2745
	/* remove any leases */
2746
	locks_remove_lease(filp, ctx);
2747 2748 2749 2750 2751 2752

	spin_lock(&ctx->flc_lock);
	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
	spin_unlock(&ctx->flc_lock);
L
Linus Torvalds 已提交
2753 2754
}

M
Marc Eshel 已提交
2755 2756 2757 2758 2759 2760 2761 2762 2763
/**
 * vfs_cancel_lock - file byte range unblock lock
 * @filp: The file to apply the unblock to
 * @fl: The lock to be unblocked
 *
 * Used by lock managers to cancel blocked requests
 */
int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
{
2764
	if (filp->f_op->lock)
M
Marc Eshel 已提交
2765 2766 2767 2768 2769
		return filp->f_op->lock(filp, F_CANCELLK, fl);
	return 0;
}
EXPORT_SYMBOL_GPL(vfs_cancel_lock);

2770
#ifdef CONFIG_PROC_FS
2771
#include <linux/proc_fs.h>
2772 2773
#include <linux/seq_file.h>

2774 2775 2776 2777 2778
struct locks_iterator {
	int	li_cpu;
	loff_t	li_pos;
};

2779
static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2780
			    loff_t id, char *pfx)
L
Linus Torvalds 已提交
2781 2782
{
	struct inode *inode = NULL;
2783
	unsigned int fl_pid;
2784
	struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2785

2786 2787
	fl_pid = locks_translate_pid(fl, proc_pidns);
	/*
2788 2789 2790
	 * If lock owner is dead (and pid is freed) or not visible in current
	 * pidns, zero is shown as a pid value. Check lock info from
	 * init_pid_ns to get saved lock pid value.
2791
	 */
L
Linus Torvalds 已提交
2792 2793

	if (fl->fl_file != NULL)
2794
		inode = locks_inode(fl->fl_file);
L
Linus Torvalds 已提交
2795

2796
	seq_printf(f, "%lld:%s ", id, pfx);
L
Linus Torvalds 已提交
2797
	if (IS_POSIX(fl)) {
2798
		if (fl->fl_flags & FL_ACCESS)
2799
			seq_puts(f, "ACCESS");
2800
		else if (IS_OFDLCK(fl))
2801
			seq_puts(f, "OFDLCK");
2802
		else
2803
			seq_puts(f, "POSIX ");
2804 2805

		seq_printf(f, " %s ",
L
Linus Torvalds 已提交
2806
			     (inode == NULL) ? "*NOINODE*" :
2807
			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
L
Linus Torvalds 已提交
2808 2809
	} else if (IS_FLOCK(fl)) {
		if (fl->fl_type & LOCK_MAND) {
2810
			seq_puts(f, "FLOCK  MSNFS     ");
L
Linus Torvalds 已提交
2811
		} else {
2812
			seq_puts(f, "FLOCK  ADVISORY  ");
L
Linus Torvalds 已提交
2813 2814
		}
	} else if (IS_LEASE(fl)) {
2815 2816 2817 2818 2819
		if (fl->fl_flags & FL_DELEG)
			seq_puts(f, "DELEG  ");
		else
			seq_puts(f, "LEASE  ");

J
J. Bruce Fields 已提交
2820
		if (lease_breaking(fl))
2821
			seq_puts(f, "BREAKING  ");
L
Linus Torvalds 已提交
2822
		else if (fl->fl_file)
2823
			seq_puts(f, "ACTIVE    ");
L
Linus Torvalds 已提交
2824
		else
2825
			seq_puts(f, "BREAKER   ");
L
Linus Torvalds 已提交
2826
	} else {
2827
		seq_puts(f, "UNKNOWN UNKNOWN  ");
L
Linus Torvalds 已提交
2828 2829
	}
	if (fl->fl_type & LOCK_MAND) {
2830
		seq_printf(f, "%s ",
L
Linus Torvalds 已提交
2831 2832 2833 2834
			       (fl->fl_type & LOCK_READ)
			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
	} else {
2835 2836 2837 2838
		int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;

		seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
				     (type == F_RDLCK) ? "READ" : "UNLCK");
L
Linus Torvalds 已提交
2839 2840
	}
	if (inode) {
2841
		/* userspace relies on this representation of dev_t */
2842
		seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
L
Linus Torvalds 已提交
2843 2844 2845
				MAJOR(inode->i_sb->s_dev),
				MINOR(inode->i_sb->s_dev), inode->i_ino);
	} else {
2846
		seq_printf(f, "%d <none>:0 ", fl_pid);
L
Linus Torvalds 已提交
2847 2848 2849
	}
	if (IS_POSIX(fl)) {
		if (fl->fl_end == OFFSET_MAX)
2850
			seq_printf(f, "%Ld EOF\n", fl->fl_start);
L
Linus Torvalds 已提交
2851
		else
2852
			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
L
Linus Torvalds 已提交
2853
	} else {
2854
		seq_puts(f, "0 EOF\n");
L
Linus Torvalds 已提交
2855 2856 2857
	}
}

2858
static int locks_show(struct seq_file *f, void *v)
L
Linus Torvalds 已提交
2859
{
2860
	struct locks_iterator *iter = f->private;
2861
	struct file_lock *fl, *bfl;
2862
	struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
L
Linus Torvalds 已提交
2863

2864
	fl = hlist_entry(v, struct file_lock, fl_link);
L
Linus Torvalds 已提交
2865

2866
	if (locks_translate_pid(fl, proc_pidns) == 0)
2867 2868
		return 0;

2869
	lock_get_status(f, fl, iter->li_pos, "");
L
Linus Torvalds 已提交
2870

2871
	list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2872
		lock_get_status(f, bfl, iter->li_pos, " ->");
2873

2874 2875
	return 0;
}
L
Linus Torvalds 已提交
2876

2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
static void __show_fd_locks(struct seq_file *f,
			struct list_head *head, int *id,
			struct file *filp, struct files_struct *files)
{
	struct file_lock *fl;

	list_for_each_entry(fl, head, fl_list) {

		if (filp != fl->fl_file)
			continue;
		if (fl->fl_owner != files &&
		    fl->fl_owner != filp)
			continue;

		(*id)++;
		seq_puts(f, "lock:\t");
		lock_get_status(f, fl, *id, "");
	}
}

void show_fd_locks(struct seq_file *f,
		  struct file *filp, struct files_struct *files)
{
2900
	struct inode *inode = locks_inode(filp);
2901 2902 2903
	struct file_lock_context *ctx;
	int id = 0;

2904
	ctx = smp_load_acquire(&inode->i_flctx);
2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
	if (!ctx)
		return;

	spin_lock(&ctx->flc_lock);
	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
	spin_unlock(&ctx->flc_lock);
}

2915
static void *locks_start(struct seq_file *f, loff_t *pos)
2916
	__acquires(&blocked_lock_lock)
2917
{
2918
	struct locks_iterator *iter = f->private;
2919

2920
	iter->li_pos = *pos + 1;
2921
	percpu_down_write(&file_rwsem);
2922
	spin_lock(&blocked_lock_lock);
2923
	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2924
}
L
Linus Torvalds 已提交
2925

2926 2927
static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
{
2928 2929 2930
	struct locks_iterator *iter = f->private;

	++iter->li_pos;
2931
	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2932
}
L
Linus Torvalds 已提交
2933

2934
static void locks_stop(struct seq_file *f, void *v)
2935
	__releases(&blocked_lock_lock)
2936
{
2937
	spin_unlock(&blocked_lock_lock);
2938
	percpu_up_write(&file_rwsem);
L
Linus Torvalds 已提交
2939 2940
}

2941
static const struct seq_operations locks_seq_operations = {
2942 2943 2944 2945 2946
	.start	= locks_start,
	.next	= locks_next,
	.stop	= locks_stop,
	.show	= locks_show,
};
2947 2948 2949

static int __init proc_locks_init(void)
{
2950 2951
	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
			sizeof(struct locks_iterator), NULL);
2952 2953
	return 0;
}
2954
fs_initcall(proc_locks_init);
2955 2956
#endif

L
Linus Torvalds 已提交
2957 2958
static int __init filelock_init(void)
{
2959 2960
	int i;

2961 2962 2963
	flctx_cache = kmem_cache_create("file_lock_ctx",
			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);

L
Linus Torvalds 已提交
2964
	filelock_cache = kmem_cache_create("file_lock_cache",
M
Miklos Szeredi 已提交
2965 2966
			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);

2967 2968 2969 2970 2971 2972
	for_each_possible_cpu(i) {
		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);

		spin_lock_init(&fll->lock);
		INIT_HLIST_HEAD(&fll->hlist);
	}
2973

2974
	lease_notifier_chain_init();
L
Linus Torvalds 已提交
2975 2976 2977
	return 0;
}
core_initcall(filelock_init);