file.c 33.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
D
David Teigland 已提交
2 3
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4
 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
5 6 7 8 9 10 11 12 13 14
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
M
Miklos Szeredi 已提交
15
#include <linux/mount.h>
16
#include <linux/fs.h>
17
#include <linux/gfs2_ondisk.h>
18 19
#include <linux/falloc.h>
#include <linux/swap.h>
20
#include <linux/crc32.h>
21
#include <linux/writeback.h>
22
#include <linux/uaccess.h>
23 24
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
25
#include <linux/delay.h>
26
#include <linux/backing-dev.h>
D
David Teigland 已提交
27 28

#include "gfs2.h"
29
#include "incore.h"
D
David Teigland 已提交
30
#include "bmap.h"
31
#include "aops.h"
D
David Teigland 已提交
32 33 34 35 36 37 38 39 40
#include "dir.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
41
#include "util.h"
D
David Teigland 已提交
42 43 44 45 46

/**
 * gfs2_llseek - seek to a location in a file
 * @file: the file
 * @offset: the offset
47
 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
D
David Teigland 已提交
48 49 50 51 52 53 54
 *
 * SEEK_END requires the glock for the file because it references the
 * file's size.
 *
 * Returns: The new offset, or errno
 */

55
static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
D
David Teigland 已提交
56
{
57
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
D
David Teigland 已提交
58 59 60
	struct gfs2_holder i_gh;
	loff_t error;

61
	switch (whence) {
62
	case SEEK_END:
D
David Teigland 已提交
63 64 65
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
		if (!error) {
66
			error = generic_file_llseek(file, offset, whence);
D
David Teigland 已提交
67 68
			gfs2_glock_dq_uninit(&i_gh);
		}
69
		break;
70 71 72 73 74 75 76 77 78

	case SEEK_DATA:
		error = gfs2_seek_data(file, offset);
		break;

	case SEEK_HOLE:
		error = gfs2_seek_hole(file, offset);
		break;

79 80
	case SEEK_CUR:
	case SEEK_SET:
81 82 83 84
		/*
		 * These don't reference inode->i_size and don't depend on the
		 * block mapping, so we don't need the glock.
		 */
85
		error = generic_file_llseek(file, offset, whence);
86 87 88 89
		break;
	default:
		error = -EINVAL;
	}
D
David Teigland 已提交
90 91 92 93 94

	return error;
}

/**
A
Al Viro 已提交
95
 * gfs2_readdir - Iterator for a directory
D
David Teigland 已提交
96
 * @file: The directory to read from
A
Al Viro 已提交
97
 * @ctx: What to feed directory entries to
D
David Teigland 已提交
98 99 100 101
 *
 * Returns: errno
 */

A
Al Viro 已提交
102
static int gfs2_readdir(struct file *file, struct dir_context *ctx)
D
David Teigland 已提交
103
{
104
	struct inode *dir = file->f_mapping->host;
105
	struct gfs2_inode *dip = GFS2_I(dir);
D
David Teigland 已提交
106 107 108
	struct gfs2_holder d_gh;
	int error;

A
Al Viro 已提交
109 110
	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
	if (error)
D
David Teigland 已提交
111 112
		return error;

A
Al Viro 已提交
113
	error = gfs2_dir_read(dir, ctx, &file->f_ra);
D
David Teigland 已提交
114 115 116 117 118 119

	gfs2_glock_dq_uninit(&d_gh);

	return error;
}

120
/**
121
 * fsflag_gfs2flag
122
 *
123 124
 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
 * and to GFS2_DIF_JDATA for non-directories.
125
 */
126 127 128 129 130 131 132 133 134 135 136
static struct {
	u32 fsflag;
	u32 gfsflag;
} fsflag_gfs2flag[] = {
	{FS_SYNC_FL, GFS2_DIF_SYNC},
	{FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
	{FS_APPEND_FL, GFS2_DIF_APPENDONLY},
	{FS_NOATIME_FL, GFS2_DIF_NOATIME},
	{FS_INDEX_FL, GFS2_DIF_EXHASH},
	{FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
	{FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
137
};
138

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
{
	int i;
	u32 fsflags = 0;

	if (S_ISDIR(inode->i_mode))
		gfsflags &= ~GFS2_DIF_JDATA;
	else
		gfsflags &= ~GFS2_DIF_INHERIT_JDATA;

	for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
		if (gfsflags & fsflag_gfs2flag[i].gfsflag)
			fsflags |= fsflag_gfs2flag[i].fsflag;
	return fsflags;
}

155
static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
156
{
A
Al Viro 已提交
157
	struct inode *inode = file_inode(filp);
158
	struct gfs2_inode *ip = GFS2_I(inode);
159
	struct gfs2_holder gh;
160 161
	int error;
	u32 fsflags;
162

163 164
	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
	error = gfs2_glock_nq(&gh);
165
	if (error)
166
		goto out_uninit;
167

168
	fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
169

170
	if (put_user(fsflags, ptr))
171 172
		error = -EFAULT;

173
	gfs2_glock_dq(&gh);
174
out_uninit:
175 176 177 178
	gfs2_holder_uninit(&gh);
	return error;
}

179 180 181 182 183
void gfs2_set_inode_flags(struct inode *inode)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	unsigned int flags = inode->i_flags;

S
Steven Whitehouse 已提交
184 185
	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
186
		flags |= S_NOSEC;
187
	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
188
		flags |= S_IMMUTABLE;
189
	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
190
		flags |= S_APPEND;
191
	if (ip->i_diskflags & GFS2_DIF_NOATIME)
192
		flags |= S_NOATIME;
193
	if (ip->i_diskflags & GFS2_DIF_SYNC)
194 195 196 197
		flags |= S_SYNC;
	inode->i_flags = flags;
}

198 199 200 201 202 203
/* Flags that can be set by user space */
#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
			     GFS2_DIF_IMMUTABLE|		\
			     GFS2_DIF_APPENDONLY|		\
			     GFS2_DIF_NOATIME|			\
			     GFS2_DIF_SYNC|			\
204
			     GFS2_DIF_TOPDIR|			\
205 206 207
			     GFS2_DIF_INHERIT_JDATA)

/**
208 209 210
 * do_gfs2_set_flags - set flags on an inode
 * @filp: file pointer
 * @reqflags: The flags to set
211
 * @mask: Indicates which flags are valid
212
 * @fsflags: The FS_* inode flags passed in
213 214
 *
 */
215 216
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
			     const u32 fsflags)
217
{
A
Al Viro 已提交
218
	struct inode *inode = file_inode(filp);
219 220
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
221 222 223
	struct buffer_head *bh;
	struct gfs2_holder gh;
	int error;
224
	u32 new_flags, flags, oldflags;
225

226
	error = mnt_want_write_file(filp);
227
	if (error)
228 229
		return error;

M
Miklos Szeredi 已提交
230 231 232 233
	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (error)
		goto out_drop_write;

234 235 236 237 238
	oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
	error = vfs_ioc_setflags_prepare(inode, oldflags, fsflags);
	if (error)
		goto out;

239
	error = -EACCES;
240
	if (!inode_owner_or_capable(inode))
241 242 243
		goto out;

	error = 0;
244
	flags = ip->i_diskflags;
245
	new_flags = (flags & ~mask) | (reqflags & mask);
246 247 248 249 250 251 252 253
	if ((new_flags ^ flags) == 0)
		goto out;

	error = -EPERM;
	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
		goto out;
	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
		goto out;
254
	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
255
	    !capable(CAP_LINUX_IMMUTABLE))
256
		goto out;
257
	if (!IS_IMMUTABLE(inode)) {
258
		error = gfs2_permission(inode, MAY_WRITE);
259 260 261
		if (error)
			goto out;
	}
262
	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
263
		if (new_flags & GFS2_DIF_JDATA)
264
			gfs2_log_flush(sdp, ip->i_gl,
265 266
				       GFS2_LOG_HEAD_FLUSH_NORMAL |
				       GFS2_LFC_SET_FLAGS);
267 268 269 270 271 272
		error = filemap_fdatawrite(inode->i_mapping);
		if (error)
			goto out;
		error = filemap_fdatawait(inode->i_mapping);
		if (error)
			goto out;
273 274
		if (new_flags & GFS2_DIF_JDATA)
			gfs2_ordered_del_inode(ip);
275
	}
276
	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
277 278
	if (error)
		goto out;
279 280 281
	error = gfs2_meta_inode_buffer(ip, &bh);
	if (error)
		goto out_trans_end;
282
	inode->i_ctime = current_time(inode);
283
	gfs2_trans_add_meta(ip->i_gl, bh);
284
	ip->i_diskflags = new_flags;
285
	gfs2_dinode_out(ip, bh->b_data);
286
	brelse(bh);
287
	gfs2_set_inode_flags(inode);
288
	gfs2_set_aops(inode);
289 290
out_trans_end:
	gfs2_trans_end(sdp);
291 292
out:
	gfs2_glock_dq_uninit(&gh);
M
Miklos Szeredi 已提交
293
out_drop_write:
A
Al Viro 已提交
294
	mnt_drop_write_file(filp);
295 296 297
	return error;
}

298
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
299
{
A
Al Viro 已提交
300
	struct inode *inode = file_inode(filp);
301 302 303
	u32 fsflags, gfsflags = 0;
	u32 mask;
	int i;
304

305
	if (get_user(fsflags, ptr))
306
		return -EFAULT;
307

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
		if (fsflags & fsflag_gfs2flag[i].fsflag) {
			fsflags &= ~fsflag_gfs2flag[i].fsflag;
			gfsflags |= fsflag_gfs2flag[i].gfsflag;
		}
	}
	if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
		return -EINVAL;

	mask = GFS2_FLAGS_USER_SET;
	if (S_ISDIR(inode->i_mode)) {
		mask &= ~GFS2_DIF_JDATA;
	} else {
		/* The GFS2_DIF_TOPDIR flag is only valid for directories. */
		if (gfsflags & GFS2_DIF_TOPDIR)
			return -EINVAL;
		mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
325
	}
326

327
	return do_gfs2_set_flags(filp, gfsflags, mask, fsflags);
328 329
}

S
Steve Whitehouse 已提交
330 331 332 333 334 335 336 337 338 339 340
static int gfs2_getlabel(struct file *filp, char __user *label)
{
	struct inode *inode = file_inode(filp);
	struct gfs2_sbd *sdp = GFS2_SB(inode);

	if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
		return -EFAULT;

	return 0;
}

341
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
342 343
{
	switch(cmd) {
344
	case FS_IOC_GETFLAGS:
345
		return gfs2_get_flags(filp, (u32 __user *)arg);
346
	case FS_IOC_SETFLAGS:
347
		return gfs2_set_flags(filp, (u32 __user *)arg);
S
Steven Whitehouse 已提交
348 349
	case FITRIM:
		return gfs2_fitrim(filp, (void __user *)arg);
S
Steve Whitehouse 已提交
350 351
	case FS_IOC_GETFSLABEL:
		return gfs2_getlabel(filp, (char __user *)arg);
352
	}
S
Steve Whitehouse 已提交
353

354 355 356
	return -ENOTTY;
}

357 358
/**
 * gfs2_size_hint - Give a hint to the size of a write request
359
 * @filep: The struct file
360 361 362 363 364 365 366 367 368 369 370
 * @offset: The file offset of the write
 * @size: The length of the write
 *
 * When we are about to do a write, this function records the total
 * write size in order to provide a suitable hint to the lower layers
 * about how many blocks will be required.
 *
 */

static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
{
A
Al Viro 已提交
371
	struct inode *inode = file_inode(filep);
372 373 374 375 376
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *ip = GFS2_I(inode);
	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
	int hint = min_t(size_t, INT_MAX, blks);

377 378
	if (hint > atomic_read(&ip->i_sizehint))
		atomic_set(&ip->i_sizehint, hint);
379 380
}

381
/**
382
 * gfs2_allocate_page_backing - Allocate blocks for a write fault
383 384
 * @page: The (locked) page to allocate backing for
 *
385 386 387 388
 * We try to allocate all the blocks required for the page in one go.  This
 * might fail for various reasons, so we keep trying until all the blocks to
 * back this page are allocated.  If some of the blocks are already allocated,
 * that is ok too.
389 390 391
 */
static int gfs2_allocate_page_backing(struct page *page)
{
392 393
	u64 pos = page_offset(page);
	u64 size = PAGE_SIZE;
394 395

	do {
396 397 398
		struct iomap iomap = { };

		if (gfs2_iomap_get_alloc(page->mapping->host, pos, 1, &iomap))
399
			return -EIO;
400 401 402 403 404 405

		iomap.length = min(iomap.length, size);
		size -= iomap.length;
		pos += iomap.length;
	} while (size > 0);

406 407 408 409 410 411
	return 0;
}

/**
 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 * @vma: The virtual memory area
412
 * @vmf: The virtual memory fault containing the page to become writable
413 414 415 416 417
 *
 * When the page becomes writable, we need to ensure that we have
 * blocks allocated on disk to back that page.
 */

418
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
419
{
420
	struct page *page = vmf->page;
421
	struct inode *inode = file_inode(vmf->vma->vm_file);
422 423
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
424
	struct gfs2_alloc_parms ap = { .aflags = 0, };
425
	unsigned long last_index;
426
	u64 pos = page_offset(page);
427 428
	unsigned int data_blocks, ind_blocks, rblocks;
	struct gfs2_holder gh;
S
Steven Whitehouse 已提交
429
	loff_t size;
430 431
	int ret;

432
	sb_start_pagefault(inode->i_sb);
S
Steven Whitehouse 已提交
433

434
	ret = gfs2_rsqa_alloc(ip);
435
	if (ret)
436
		goto out;
437

438
	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
B
Bob Peterson 已提交
439

440 441
	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	ret = gfs2_glock_nq(&gh);
442
	if (ret)
443
		goto out_uninit;
444

445
	/* Update file times before taking page lock */
446
	file_update_time(vmf->vma->vm_file);
447

448 449 450
	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
	set_bit(GIF_SW_PAGED, &ip->i_flags);

451
	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
S
Steven Whitehouse 已提交
452 453 454 455 456
		lock_page(page);
		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
			ret = -EAGAIN;
			unlock_page(page);
		}
457
		goto out_unlock;
S
Steven Whitehouse 已提交
458 459
	}

460 461
	ret = gfs2_rindex_update(sdp);
	if (ret)
462 463
		goto out_unlock;

464
	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
465
	ap.target = data_blocks + ind_blocks;
466 467 468
	ret = gfs2_quota_lock_check(ip, &ap);
	if (ret)
		goto out_unlock;
469
	ret = gfs2_inplace_reserve(ip, &ap);
470 471 472 473 474 475
	if (ret)
		goto out_quota_unlock;

	rblocks = RES_DINODE + ind_blocks;
	if (gfs2_is_jdata(ip))
		rblocks += data_blocks ? data_blocks : 1;
476
	if (ind_blocks || data_blocks) {
477
		rblocks += RES_STATFS + RES_QUOTA;
478
		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
479
	}
480 481 482 483 484 485
	ret = gfs2_trans_begin(sdp, rblocks, 0);
	if (ret)
		goto out_trans_fail;

	lock_page(page);
	ret = -EINVAL;
S
Steven Whitehouse 已提交
486
	size = i_size_read(inode);
487
	last_index = (size - 1) >> PAGE_SHIFT;
S
Steven Whitehouse 已提交
488 489 490 491 492 493 494 495 496 497 498 499
	/* Check page index against inode size */
	if (size == 0 || (page->index > last_index))
		goto out_trans_end;

	ret = -EAGAIN;
	/* If truncated, we must retry the operation, we may have raced
	 * with the glock demotion code.
	 */
	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
		goto out_trans_end;

	/* Unstuff, if required, and allocate backing blocks for page */
500
	ret = 0;
S
Steven Whitehouse 已提交
501
	if (gfs2_is_stuffed(ip))
502
		ret = gfs2_unstuff_dinode(ip, page);
S
Steven Whitehouse 已提交
503 504
	if (ret == 0)
		ret = gfs2_allocate_page_backing(page);
505

S
Steven Whitehouse 已提交
506 507 508
out_trans_end:
	if (ret)
		unlock_page(page);
509 510 511 512 513 514 515
	gfs2_trans_end(sdp);
out_trans_fail:
	gfs2_inplace_release(ip);
out_quota_unlock:
	gfs2_quota_unlock(ip);
out_unlock:
	gfs2_glock_dq(&gh);
516
out_uninit:
517
	gfs2_holder_uninit(&gh);
S
Steven Whitehouse 已提交
518 519
	if (ret == 0) {
		set_page_dirty(page);
520
		wait_for_stable_page(page);
S
Steven Whitehouse 已提交
521
	}
522
out:
523
	sb_end_pagefault(inode->i_sb);
S
Steven Whitehouse 已提交
524
	return block_page_mkwrite_return(ret);
525 526
}

527
static const struct vm_operations_struct gfs2_vm_ops = {
528
	.fault = filemap_fault,
529
	.map_pages = filemap_map_pages,
530 531 532
	.page_mkwrite = gfs2_page_mkwrite,
};

D
David Teigland 已提交
533 534 535 536 537
/**
 * gfs2_mmap -
 * @file: The file to map
 * @vma: The VMA which described the mapping
 *
538 539 540 541 542
 * There is no need to get a lock here unless we should be updating
 * atime. We ignore any locking errors since the only consequence is
 * a missed atime update (which will just be deferred until later).
 *
 * Returns: 0
D
David Teigland 已提交
543 544 545 546
 */

static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
547
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
D
David Teigland 已提交
548

549 550
	if (!(file->f_flags & O_NOATIME) &&
	    !IS_NOATIME(&ip->i_inode)) {
551 552
		struct gfs2_holder i_gh;
		int error;
D
David Teigland 已提交
553

554 555
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
556 557
		if (error)
			return error;
558 559 560
		/* grab lock to update inode */
		gfs2_glock_dq_uninit(&i_gh);
		file_accessed(file);
561
	}
562
	vma->vm_ops = &gfs2_vm_ops;
D
David Teigland 已提交
563

564
	return 0;
D
David Teigland 已提交
565 566 567
}

/**
568 569 570
 * gfs2_open_common - This is common to open and atomic_open
 * @inode: The inode being opened
 * @file: The file being opened
D
David Teigland 已提交
571
 *
572 573 574 575 576 577
 * This maybe called under a glock or not depending upon how it has
 * been called. We must always be called under a glock for regular
 * files, however. For other file types, it does not matter whether
 * we hold the glock or not.
 *
 * Returns: Error code or 0 for success
D
David Teigland 已提交
578 579
 */

580
int gfs2_open_common(struct inode *inode, struct file *file)
D
David Teigland 已提交
581 582
{
	struct gfs2_file *fp;
583 584 585 586 587 588 589
	int ret;

	if (S_ISREG(inode->i_mode)) {
		ret = generic_file_open(inode, file);
		if (ret)
			return ret;
	}
D
David Teigland 已提交
590

591
	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
D
David Teigland 已提交
592 593 594
	if (!fp)
		return -ENOMEM;

595
	mutex_init(&fp->f_fl_mutex);
D
David Teigland 已提交
596

597
	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
598
	file->private_data = fp;
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
	return 0;
}

/**
 * gfs2_open - open a file
 * @inode: the inode to open
 * @file: the struct file for this opening
 *
 * After atomic_open, this function is only used for opening files
 * which are already cached. We must still get the glock for regular
 * files to ensure that we have the file size uptodate for the large
 * file check which is in the common code. That is only an issue for
 * regular files though.
 *
 * Returns: errno
 */

static int gfs2_open(struct inode *inode, struct file *file)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder i_gh;
	int error;
	bool need_unlock = false;
D
David Teigland 已提交
622

623
	if (S_ISREG(ip->i_inode.i_mode)) {
D
David Teigland 已提交
624 625 626
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
		if (error)
627 628 629
			return error;
		need_unlock = true;
	}
D
David Teigland 已提交
630

631
	error = gfs2_open_common(inode, file);
D
David Teigland 已提交
632

633
	if (need_unlock)
D
David Teigland 已提交
634 635 636 637 638 639
		gfs2_glock_dq_uninit(&i_gh);

	return error;
}

/**
640
 * gfs2_release - called to close a struct file
D
David Teigland 已提交
641 642 643 644 645 646
 * @inode: the inode the struct file belongs to
 * @file: the struct file being closed
 *
 * Returns: errno
 */

647
static int gfs2_release(struct inode *inode, struct file *file)
D
David Teigland 已提交
648
{
649
	struct gfs2_inode *ip = GFS2_I(inode);
D
David Teigland 已提交
650

B
Bob Peterson 已提交
651
	kfree(file->private_data);
652
	file->private_data = NULL;
D
David Teigland 已提交
653

654 655
	if (!(file->f_mode & FMODE_WRITE))
		return 0;
656

657
	gfs2_rsqa_delete(ip, &inode->i_writecount);
D
David Teigland 已提交
658 659 660 661 662
	return 0;
}

/**
 * gfs2_fsync - sync the dirty data for a file (across the cluster)
663 664 665
 * @file: the file that points to the dentry
 * @start: the start position in the file to sync
 * @end: the end position in the file to sync
S
Steven Whitehouse 已提交
666
 * @datasync: set if we can ignore timestamp changes
D
David Teigland 已提交
667
 *
668 669 670 671 672 673 674 675 676 677
 * We split the data flushing here so that we don't wait for the data
 * until after we've also sent the metadata to disk. Note that for
 * data=ordered, we will write & wait for the data at the log flush
 * stage anyway, so this is unlikely to make much of a difference
 * except in the data=writeback case.
 *
 * If the fdatawrite fails due to any reason except -EIO, we will
 * continue the remainder of the fsync, although we'll still report
 * the error at the end. This is to match filemap_write_and_wait_range()
 * behaviour.
678
 *
D
David Teigland 已提交
679 680 681
 * Returns: errno
 */

682 683
static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
		      int datasync)
D
David Teigland 已提交
684
{
685 686
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
687
	int sync_state = inode->i_state & I_DIRTY_ALL;
S
Steven Whitehouse 已提交
688
	struct gfs2_inode *ip = GFS2_I(inode);
689
	int ret = 0, ret1 = 0;
D
David Teigland 已提交
690

691 692 693 694 695
	if (mapping->nrpages) {
		ret1 = filemap_fdatawrite_range(mapping, start, end);
		if (ret1 == -EIO)
			return ret1;
	}
696

697 698
	if (!gfs2_is_jdata(ip))
		sync_state &= ~I_DIRTY_PAGES;
S
Steven Whitehouse 已提交
699
	if (datasync)
700
		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
D
David Teigland 已提交
701

S
Steven Whitehouse 已提交
702 703
	if (sync_state) {
		ret = sync_inode_metadata(inode, 1);
704
		if (ret)
S
Steven Whitehouse 已提交
705
			return ret;
706
		if (gfs2_is_jdata(ip))
707 708 709
			ret = file_write_and_wait(file);
		if (ret)
			return ret;
710
		gfs2_ail_flush(ip->i_gl, 1);
711 712
	}

713
	if (mapping->nrpages)
714
		ret = file_fdatawait_range(file, start, end);
715 716

	return ret ? ret : ret1;
D
David Teigland 已提交
717 718
}

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
{
	struct file *file = iocb->ki_filp;
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
	size_t count = iov_iter_count(to);
	struct gfs2_holder gh;
	ssize_t ret;

	if (!count)
		return 0; /* skip atime */

	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);

	gfs2_glock_dq(&gh);
out_uninit:
	gfs2_holder_uninit(&gh);
	return ret;
}

static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	size_t len = iov_iter_count(from);
	loff_t offset = iocb->ki_pos;
	struct gfs2_holder gh;
	ssize_t ret;

	/*
	 * Deferred lock, even if its a write, since we do no allocation on
	 * this path. All we need to change is the atime, and this lock mode
	 * ensures that other nodes have flushed their buffered read caches
	 * (i.e. their page cache entries for this inode). We do not,
	 * unfortunately, have the option of only flushing a range like the
	 * VFS does.
	 */
	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	/* Silently fall back to buffered I/O when writing beyond EOF */
	if (offset + len > i_size_read(&ip->i_inode))
		goto out;

	ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);

out:
	gfs2_glock_dq(&gh);
out_uninit:
	gfs2_holder_uninit(&gh);
	return ret;
}

static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = gfs2_file_direct_read(iocb, to);
		if (likely(ret != -ENOTBLK))
			return ret;
		iocb->ki_flags &= ~IOCB_DIRECT;
	}
	return generic_file_read_iter(iocb, to);
}

792
/**
A
Al Viro 已提交
793
 * gfs2_file_write_iter - Perform a write to a file
794
 * @iocb: The io context
795
 * @from: The data to write
796 797 798 799 800 801 802 803
 *
 * We have to do a lock/unlock here to refresh the inode size for
 * O_APPEND writes, otherwise we can land up writing at the wrong
 * offset. There is still a race, but provided the app is using its
 * own file locking, this will make O_APPEND work as expected.
 *
 */

A
Al Viro 已提交
804
static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
805 806
{
	struct file *file = iocb->ki_filp;
807 808
	struct inode *inode = file_inode(file);
	struct gfs2_inode *ip = GFS2_I(inode);
809
	ssize_t written = 0, ret;
810

811
	ret = gfs2_rsqa_alloc(ip);
812 813
	if (ret)
		return ret;
814

A
Al Viro 已提交
815
	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
816

817
	if (iocb->ki_flags & IOCB_APPEND) {
818 819 820 821 822 823 824 825
		struct gfs2_holder gh;

		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
		if (ret)
			return ret;
		gfs2_glock_dq_uninit(&gh);
	}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	inode_lock(inode);
	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	/* We can write back this queue in page reclaim */
	current->backing_dev_info = inode_to_bdi(inode);

	ret = file_remove_privs(file);
	if (ret)
		goto out2;

	ret = file_update_time(file);
	if (ret)
		goto out2;

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	if (iocb->ki_flags & IOCB_DIRECT) {
		struct address_space *mapping = file->f_mapping;
		loff_t pos, endbyte;
		ssize_t buffered;

		written = gfs2_file_direct_write(iocb, from);
		if (written < 0 || !iov_iter_count(from))
			goto out2;

		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
		if (unlikely(ret < 0))
			goto out2;
		buffered = ret;

		/*
		 * We need to ensure that the page cache pages are written to
		 * disk and invalidated to preserve the expected O_DIRECT
		 * semantics.
		 */
		pos = iocb->ki_pos;
		endbyte = pos + buffered - 1;
		ret = filemap_write_and_wait_range(mapping, pos, endbyte);
		if (!ret) {
			iocb->ki_pos += buffered;
			written += buffered;
			invalidate_mapping_pages(mapping,
						 pos >> PAGE_SHIFT,
						 endbyte >> PAGE_SHIFT);
		} else {
			/*
			 * We don't know how much we wrote, so just return
			 * the number of bytes which were direct-written
			 */
		}
	} else {
		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
		if (likely(ret > 0))
			iocb->ki_pos += ret;
	}
881 882 883 884 885 886 887 888 889

out2:
	current->backing_dev_info = NULL;
out:
	inode_unlock(inode);
	if (likely(ret > 0)) {
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
890
	return written ? written : ret;
891 892
}

893 894 895
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
			   int mode)
{
896
	struct super_block *sb = inode->i_sb;
897
	struct gfs2_inode *ip = GFS2_I(inode);
898
	loff_t end = offset + len;
899 900 901 902 903
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (unlikely(error))
904
		return error;
905

906
	gfs2_trans_add_meta(ip->i_gl, dibh);
907 908 909 910 911 912 913

	if (gfs2_is_stuffed(ip)) {
		error = gfs2_unstuff_dinode(ip, NULL);
		if (unlikely(error))
			goto out;
	}

914
	while (offset < end) {
915 916
		struct iomap iomap = { };

917 918
		error = gfs2_iomap_get_alloc(inode, offset, end - offset,
					     &iomap);
919
		if (error)
920
			goto out;
921
		offset = iomap.offset + iomap.length;
922
		if (!(iomap.flags & IOMAP_F_NEW))
923
			continue;
924 925 926 927 928
		error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
					 iomap.length >> inode->i_blkbits,
					 GFP_NOFS);
		if (error) {
			fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
929
			goto out;
930
		}
931 932
	}
out:
933
	brelse(dibh);
934 935
	return error;
}
936

937 938 939 940 941 942 943 944 945 946 947 948 949 950
/**
 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
 *                     blocks, determine how many bytes can be written.
 * @ip:          The inode in question.
 * @len:         Max cap of bytes. What we return in *len must be <= this.
 * @data_blocks: Compute and return the number of data blocks needed
 * @ind_blocks:  Compute and return the number of indirect blocks needed
 * @max_blocks:  The total blocks available to work with.
 *
 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
 */
static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
			    unsigned int *data_blocks, unsigned int *ind_blocks,
			    unsigned int max_blocks)
951
{
952
	loff_t max = *len;
953 954 955 956 957 958 959
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);

	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
		max_data -= tmp;
	}
960

961 962 963 964 965 966 967 968 969
	*data_blocks = max_data;
	*ind_blocks = max_blocks - max_data;
	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
	if (*len > max) {
		*len = max;
		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
	}
}

970
static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
971
{
A
Al Viro 已提交
972
	struct inode *inode = file_inode(file);
973 974
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *ip = GFS2_I(inode);
975
	struct gfs2_alloc_parms ap = { .aflags = 0, };
976
	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
977
	loff_t bytes, max_bytes, max_blks;
978
	int error;
979 980
	const loff_t pos = offset;
	const loff_t count = len;
981
	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
982
	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
983
	loff_t max_chunk_size = UINT_MAX & bsize_mask;
984

985 986
	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;

987
	offset &= bsize_mask;
988 989 990 991 992

	len = next - offset;
	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
	if (!bytes)
		bytes = UINT_MAX;
993 994 995
	bytes &= bsize_mask;
	if (bytes == 0)
		bytes = sdp->sd_sb.sb_bsize;
996

997
	gfs2_size_hint(file, offset, len);
B
Bob Peterson 已提交
998

999 1000 1001
	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
	ap.min_target = data_blocks + ind_blocks;

1002 1003 1004
	while (len > 0) {
		if (len < bytes)
			bytes = len;
1005 1006 1007 1008 1009
		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
			len -= bytes;
			offset += bytes;
			continue;
		}
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

		/* We need to determine how many bytes we can actually
		 * fallocate without exceeding quota or going over the
		 * end of the fs. We start off optimistically by assuming
		 * we can write max_bytes */
		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;

		/* Since max_bytes is most likely a theoretical max, we
		 * calculate a more realistic 'bytes' to serve as a good
		 * starting point for the number of bytes we may be able
		 * to write */
1021
		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1022
		ap.target = data_blocks + ind_blocks;
1023 1024

		error = gfs2_quota_lock_check(ip, &ap);
1025
		if (error)
1026
			return error;
1027 1028
		/* ap.allowed tells us how many blocks quota will allow
		 * us to write. Check if this reduces max_blks */
1029 1030
		max_blks = UINT_MAX;
		if (ap.allowed)
1031
			max_blks = ap.allowed;
1032

1033
		error = gfs2_inplace_reserve(ip, &ap);
1034
		if (error)
1035
			goto out_qunlock;
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

		/* check if the selected rgrp limits our max_blks further */
		if (ap.allowed && ap.allowed < max_blks)
			max_blks = ap.allowed;

		/* Almost done. Calculate bytes that can be written using
		 * max_blks. We also recompute max_bytes, data_blocks and
		 * ind_blocks */
		calc_max_reserv(ip, &max_bytes, &data_blocks,
				&ind_blocks, max_blks);
1046 1047

		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1048
			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1049 1050 1051 1052
		if (gfs2_is_jdata(ip))
			rblocks += data_blocks ? data_blocks : 1;

		error = gfs2_trans_begin(sdp, rblocks,
1053
					 PAGE_SIZE >> inode->i_blkbits);
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
		if (error)
			goto out_trans_fail;

		error = fallocate_chunk(inode, offset, max_bytes, mode);
		gfs2_trans_end(sdp);

		if (error)
			goto out_trans_fail;

		len -= max_bytes;
		offset += max_bytes;
		gfs2_inplace_release(ip);
		gfs2_quota_unlock(ip);
	}
1068

1069
	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1070
		i_size_write(inode, pos + count);
1071 1072
	file_update_time(file);
	mark_inode_dirty(inode);
1073

1074 1075 1076 1077
	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
		return vfs_fsync_range(file, pos, pos + count - 1,
			       (file->f_flags & __O_SYNC) ? 0 : 1);
	return 0;
1078 1079 1080 1081 1082

out_trans_fail:
	gfs2_inplace_release(ip);
out_qunlock:
	gfs2_quota_unlock(ip);
1083 1084 1085 1086 1087 1088
	return error;
}

static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
	struct inode *inode = file_inode(file);
1089
	struct gfs2_sbd *sdp = GFS2_SB(inode);
1090 1091 1092 1093
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder gh;
	int ret;

1094
	if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1095 1096 1097
		return -EOPNOTSUPP;
	/* fallocate is needed by gfs2_grow to reserve space in the rindex */
	if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1098 1099
		return -EOPNOTSUPP;

A
Al Viro 已提交
1100
	inode_lock(inode);
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117

	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
	    (offset + len) > inode->i_size) {
		ret = inode_newsize_ok(inode, offset + len);
		if (ret)
			goto out_unlock;
	}

	ret = get_write_access(inode);
	if (ret)
		goto out_unlock;

1118 1119 1120 1121 1122 1123
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		ret = __gfs2_punch_hole(file, offset, len);
	} else {
		ret = gfs2_rsqa_alloc(ip);
		if (ret)
			goto out_putw;
1124

1125 1126 1127 1128 1129
		ret = __gfs2_fallocate(file, mode, offset, len);

		if (ret)
			gfs2_rs_deltree(&ip->i_res);
	}
1130

1131 1132
out_putw:
	put_write_access(inode);
1133
out_unlock:
1134
	gfs2_glock_dq(&gh);
1135
out_uninit:
1136
	gfs2_holder_uninit(&gh);
A
Al Viro 已提交
1137
	inode_unlock(inode);
1138
	return ret;
1139 1140
}

1141 1142 1143 1144 1145 1146 1147
static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
				      struct file *out, loff_t *ppos,
				      size_t len, unsigned int flags)
{
	int error;
	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);

1148
	error = gfs2_rsqa_alloc(ip);
1149 1150 1151 1152 1153 1154 1155 1156
	if (error)
		return (ssize_t)error;

	gfs2_size_hint(out, *ppos, len);

	return iter_file_splice_write(pipe, out, ppos, len, flags);
}

1157 1158
#ifdef CONFIG_GFS2_FS_LOCKING_DLM

D
David Teigland 已提交
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
/**
 * gfs2_lock - acquire/release a posix lock on a file
 * @file: the file pointer
 * @cmd: either modify or retrieve lock state, possibly wait
 * @fl: type and range of lock
 *
 * Returns: errno
 */

static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
{
1170 1171
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1172
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
D
David Teigland 已提交
1173 1174 1175

	if (!(fl->fl_flags & FL_POSIX))
		return -ENOLCK;
1176
	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
D
David Teigland 已提交
1177 1178
		return -ENOLCK;

M
Marc Eshel 已提交
1179 1180 1181 1182 1183
	if (cmd == F_CANCELLK) {
		/* Hack: */
		cmd = F_SETLK;
		fl->fl_type = F_UNLCK;
	}
1184
	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
1185
		if (fl->fl_type == F_UNLCK)
1186
			locks_lock_file_wait(file, fl);
1187
		return -EIO;
1188
	}
D
David Teigland 已提交
1189
	if (IS_GETLK(cmd))
1190
		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
D
David Teigland 已提交
1191
	else if (fl->fl_type == F_UNLCK)
1192
		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
D
David Teigland 已提交
1193
	else
1194
		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
D
David Teigland 已提交
1195 1196 1197 1198
}

static int do_flock(struct file *file, int cmd, struct file_lock *fl)
{
1199
	struct gfs2_file *fp = file->private_data;
D
David Teigland 已提交
1200
	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
A
Al Viro 已提交
1201
	struct gfs2_inode *ip = GFS2_I(file_inode(file));
D
David Teigland 已提交
1202 1203
	struct gfs2_glock *gl;
	unsigned int state;
B
Bob Peterson 已提交
1204
	u16 flags;
D
David Teigland 已提交
1205
	int error = 0;
1206
	int sleeptime;
D
David Teigland 已提交
1207 1208

	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1209
	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
D
David Teigland 已提交
1210

1211
	mutex_lock(&fp->f_fl_mutex);
D
David Teigland 已提交
1212

1213
	if (gfs2_holder_initialized(fl_gh)) {
1214
		struct file_lock request;
D
David Teigland 已提交
1215 1216
		if (fl_gh->gh_state == state)
			goto out;
1217 1218 1219 1220
		locks_init_lock(&request);
		request.fl_type = F_UNLCK;
		request.fl_flags = FL_FLOCK;
		locks_lock_file_wait(file, &request);
1221
		gfs2_glock_dq(fl_gh);
1222
		gfs2_holder_reinit(state, flags, fl_gh);
D
David Teigland 已提交
1223
	} else {
1224 1225
		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
				       &gfs2_flock_glops, CREATE, &gl);
D
David Teigland 已提交
1226 1227
		if (error)
			goto out;
1228 1229
		gfs2_holder_init(gl, state, flags, fl_gh);
		gfs2_glock_put(gl);
D
David Teigland 已提交
1230
	}
1231 1232 1233 1234 1235 1236 1237 1238
	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
		error = gfs2_glock_nq(fl_gh);
		if (error != GLR_TRYFAILED)
			break;
		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
		fl_gh->gh_error = 0;
		msleep(sleeptime);
	}
D
David Teigland 已提交
1239 1240 1241 1242 1243
	if (error) {
		gfs2_holder_uninit(fl_gh);
		if (error == GLR_TRYFAILED)
			error = -EAGAIN;
	} else {
1244
		error = locks_lock_file_wait(file, fl);
1245
		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
D
David Teigland 已提交
1246 1247
	}

1248
out:
1249
	mutex_unlock(&fp->f_fl_mutex);
D
David Teigland 已提交
1250 1251 1252 1253 1254
	return error;
}

static void do_unflock(struct file *file, struct file_lock *fl)
{
1255
	struct gfs2_file *fp = file->private_data;
D
David Teigland 已提交
1256 1257
	struct gfs2_holder *fl_gh = &fp->f_fl_gh;

1258
	mutex_lock(&fp->f_fl_mutex);
1259
	locks_lock_file_wait(file, fl);
A
Andreas Gruenbacher 已提交
1260
	if (gfs2_holder_initialized(fl_gh)) {
1261
		gfs2_glock_dq(fl_gh);
1262 1263
		gfs2_holder_uninit(fl_gh);
	}
1264
	mutex_unlock(&fp->f_fl_mutex);
D
David Teigland 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
}

/**
 * gfs2_flock - acquire/release a flock lock on a file
 * @file: the file pointer
 * @cmd: either modify or retrieve lock state, possibly wait
 * @fl: type and range of lock
 *
 * Returns: errno
 */

static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{
	if (!(fl->fl_flags & FL_FLOCK))
		return -ENOLCK;
1280 1281
	if (fl->fl_type & LOCK_MAND)
		return -EOPNOTSUPP;
D
David Teigland 已提交
1282 1283 1284 1285

	if (fl->fl_type == F_UNLCK) {
		do_unflock(file, fl);
		return 0;
1286
	} else {
D
David Teigland 已提交
1287
		return do_flock(file, cmd, fl);
1288
	}
D
David Teigland 已提交
1289 1290
}

1291
const struct file_operations gfs2_file_fops = {
1292
	.llseek		= gfs2_llseek,
1293
	.read_iter	= gfs2_file_read_iter,
A
Al Viro 已提交
1294
	.write_iter	= gfs2_file_write_iter,
1295
	.iopoll		= iomap_dio_iopoll,
1296 1297 1298
	.unlocked_ioctl	= gfs2_ioctl,
	.mmap		= gfs2_mmap,
	.open		= gfs2_open,
1299
	.release	= gfs2_release,
1300 1301 1302
	.fsync		= gfs2_fsync,
	.lock		= gfs2_lock,
	.flock		= gfs2_flock,
1303
	.splice_read	= generic_file_splice_read,
1304
	.splice_write	= gfs2_file_splice_write,
1305
	.setlease	= simple_nosetlease,
1306
	.fallocate	= gfs2_fallocate,
D
David Teigland 已提交
1307 1308
};

1309
const struct file_operations gfs2_dir_fops = {
A
Al Viro 已提交
1310
	.iterate_shared	= gfs2_readdir,
1311 1312
	.unlocked_ioctl	= gfs2_ioctl,
	.open		= gfs2_open,
1313
	.release	= gfs2_release,
1314 1315 1316
	.fsync		= gfs2_fsync,
	.lock		= gfs2_lock,
	.flock		= gfs2_flock,
1317
	.llseek		= default_llseek,
D
David Teigland 已提交
1318 1319
};

1320 1321
#endif /* CONFIG_GFS2_FS_LOCKING_DLM */

1322
const struct file_operations gfs2_file_fops_nolock = {
1323
	.llseek		= gfs2_llseek,
1324
	.read_iter	= gfs2_file_read_iter,
A
Al Viro 已提交
1325
	.write_iter	= gfs2_file_write_iter,
1326
	.iopoll		= iomap_dio_iopoll,
1327 1328 1329
	.unlocked_ioctl	= gfs2_ioctl,
	.mmap		= gfs2_mmap,
	.open		= gfs2_open,
1330
	.release	= gfs2_release,
1331
	.fsync		= gfs2_fsync,
1332
	.splice_read	= generic_file_splice_read,
1333
	.splice_write	= gfs2_file_splice_write,
1334
	.setlease	= generic_setlease,
1335
	.fallocate	= gfs2_fallocate,
1336 1337
};

1338
const struct file_operations gfs2_dir_fops_nolock = {
A
Al Viro 已提交
1339
	.iterate_shared	= gfs2_readdir,
1340 1341
	.unlocked_ioctl	= gfs2_ioctl,
	.open		= gfs2_open,
1342
	.release	= gfs2_release,
1343
	.fsync		= gfs2_fsync,
1344
	.llseek		= default_llseek,
1345 1346
};