file.c 32.8 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13 14 15 16 17
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
M
Miklos Szeredi 已提交
18
#include <linux/mount.h>
19
#include <linux/fs.h>
20
#include <linux/gfs2_ondisk.h>
21 22
#include <linux/falloc.h>
#include <linux/swap.h>
23
#include <linux/crc32.h>
24
#include <linux/writeback.h>
25
#include <linux/uaccess.h>
26 27
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
28
#include <linux/delay.h>
29
#include <linux/backing-dev.h>
D
David Teigland 已提交
30 31

#include "gfs2.h"
32
#include "incore.h"
D
David Teigland 已提交
33
#include "bmap.h"
34
#include "aops.h"
D
David Teigland 已提交
35 36 37 38 39 40 41 42 43
#include "dir.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
44
#include "util.h"
D
David Teigland 已提交
45 46 47 48 49

/**
 * gfs2_llseek - seek to a location in a file
 * @file: the file
 * @offset: the offset
50
 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
D
David Teigland 已提交
51 52 53 54 55 56 57
 *
 * SEEK_END requires the glock for the file because it references the
 * file's size.
 *
 * Returns: The new offset, or errno
 */

58
static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
D
David Teigland 已提交
59
{
60
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
D
David Teigland 已提交
61 62 63
	struct gfs2_holder i_gh;
	loff_t error;

64
	switch (whence) {
65
	case SEEK_END:
D
David Teigland 已提交
66 67 68
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
		if (!error) {
69
			error = generic_file_llseek(file, offset, whence);
D
David Teigland 已提交
70 71
			gfs2_glock_dq_uninit(&i_gh);
		}
72
		break;
73 74 75 76 77 78 79 80 81

	case SEEK_DATA:
		error = gfs2_seek_data(file, offset);
		break;

	case SEEK_HOLE:
		error = gfs2_seek_hole(file, offset);
		break;

82 83
	case SEEK_CUR:
	case SEEK_SET:
84 85 86 87
		/*
		 * These don't reference inode->i_size and don't depend on the
		 * block mapping, so we don't need the glock.
		 */
88
		error = generic_file_llseek(file, offset, whence);
89 90 91 92
		break;
	default:
		error = -EINVAL;
	}
D
David Teigland 已提交
93 94 95 96 97

	return error;
}

/**
A
Al Viro 已提交
98
 * gfs2_readdir - Iterator for a directory
D
David Teigland 已提交
99
 * @file: The directory to read from
A
Al Viro 已提交
100
 * @ctx: What to feed directory entries to
D
David Teigland 已提交
101 102 103 104
 *
 * Returns: errno
 */

A
Al Viro 已提交
105
static int gfs2_readdir(struct file *file, struct dir_context *ctx)
D
David Teigland 已提交
106
{
107
	struct inode *dir = file->f_mapping->host;
108
	struct gfs2_inode *dip = GFS2_I(dir);
D
David Teigland 已提交
109 110 111
	struct gfs2_holder d_gh;
	int error;

A
Al Viro 已提交
112 113
	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
	if (error)
D
David Teigland 已提交
114 115
		return error;

A
Al Viro 已提交
116
	error = gfs2_dir_read(dir, ctx, &file->f_ra);
D
David Teigland 已提交
117 118 119 120 121 122

	gfs2_glock_dq_uninit(&d_gh);

	return error;
}

123
/**
124
 * fsflag_gfs2flag
125
 *
126 127
 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
 * and to GFS2_DIF_JDATA for non-directories.
128
 */
129 130 131 132 133 134 135 136 137 138 139
static struct {
	u32 fsflag;
	u32 gfsflag;
} fsflag_gfs2flag[] = {
	{FS_SYNC_FL, GFS2_DIF_SYNC},
	{FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
	{FS_APPEND_FL, GFS2_DIF_APPENDONLY},
	{FS_NOATIME_FL, GFS2_DIF_NOATIME},
	{FS_INDEX_FL, GFS2_DIF_EXHASH},
	{FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
	{FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
140
};
141

142
static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
143
{
A
Al Viro 已提交
144
	struct inode *inode = file_inode(filp);
145
	struct gfs2_inode *ip = GFS2_I(inode);
146
	struct gfs2_holder gh;
147 148
	int i, error;
	u32 gfsflags, fsflags = 0;
149

150 151
	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
	error = gfs2_glock_nq(&gh);
152
	if (error)
153
		goto out_uninit;
154

155 156 157 158 159 160 161 162 163
	gfsflags = ip->i_diskflags;
	if (S_ISDIR(inode->i_mode))
		gfsflags &= ~GFS2_DIF_JDATA;
	else
		gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
	for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
		if (gfsflags & fsflag_gfs2flag[i].gfsflag)
			fsflags |= fsflag_gfs2flag[i].fsflag;

164
	if (put_user(fsflags, ptr))
165 166
		error = -EFAULT;

167
	gfs2_glock_dq(&gh);
168
out_uninit:
169 170 171 172
	gfs2_holder_uninit(&gh);
	return error;
}

173 174 175 176 177
void gfs2_set_inode_flags(struct inode *inode)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	unsigned int flags = inode->i_flags;

S
Steven Whitehouse 已提交
178 179
	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
180
		flags |= S_NOSEC;
181
	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
182
		flags |= S_IMMUTABLE;
183
	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
184
		flags |= S_APPEND;
185
	if (ip->i_diskflags & GFS2_DIF_NOATIME)
186
		flags |= S_NOATIME;
187
	if (ip->i_diskflags & GFS2_DIF_SYNC)
188 189 190 191
		flags |= S_SYNC;
	inode->i_flags = flags;
}

192 193 194 195 196 197
/* Flags that can be set by user space */
#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
			     GFS2_DIF_IMMUTABLE|		\
			     GFS2_DIF_APPENDONLY|		\
			     GFS2_DIF_NOATIME|			\
			     GFS2_DIF_SYNC|			\
198
			     GFS2_DIF_TOPDIR|			\
199 200 201
			     GFS2_DIF_INHERIT_JDATA)

/**
202 203 204
 * do_gfs2_set_flags - set flags on an inode
 * @filp: file pointer
 * @reqflags: The flags to set
205 206 207
 * @mask: Indicates which flags are valid
 *
 */
208
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
209
{
A
Al Viro 已提交
210
	struct inode *inode = file_inode(filp);
211 212
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
213 214 215
	struct buffer_head *bh;
	struct gfs2_holder gh;
	int error;
216
	u32 new_flags, flags;
217

218
	error = mnt_want_write_file(filp);
219
	if (error)
220 221
		return error;

M
Miklos Szeredi 已提交
222 223 224 225
	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (error)
		goto out_drop_write;

226
	error = -EACCES;
227
	if (!inode_owner_or_capable(inode))
228 229 230
		goto out;

	error = 0;
231
	flags = ip->i_diskflags;
232
	new_flags = (flags & ~mask) | (reqflags & mask);
233 234 235 236 237 238 239 240
	if ((new_flags ^ flags) == 0)
		goto out;

	error = -EPERM;
	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
		goto out;
	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
		goto out;
241
	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
242
	    !capable(CAP_LINUX_IMMUTABLE))
243
		goto out;
244
	if (!IS_IMMUTABLE(inode)) {
245
		error = gfs2_permission(inode, MAY_WRITE);
246 247 248
		if (error)
			goto out;
	}
249
	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
250
		if (new_flags & GFS2_DIF_JDATA)
251
			gfs2_log_flush(sdp, ip->i_gl,
252 253
				       GFS2_LOG_HEAD_FLUSH_NORMAL |
				       GFS2_LFC_SET_FLAGS);
254 255 256 257 258 259
		error = filemap_fdatawrite(inode->i_mapping);
		if (error)
			goto out;
		error = filemap_fdatawait(inode->i_mapping);
		if (error)
			goto out;
260 261
		if (new_flags & GFS2_DIF_JDATA)
			gfs2_ordered_del_inode(ip);
262
	}
263
	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
264 265
	if (error)
		goto out;
266 267 268
	error = gfs2_meta_inode_buffer(ip, &bh);
	if (error)
		goto out_trans_end;
269
	inode->i_ctime = current_time(inode);
270
	gfs2_trans_add_meta(ip->i_gl, bh);
271
	ip->i_diskflags = new_flags;
272
	gfs2_dinode_out(ip, bh->b_data);
273
	brelse(bh);
274
	gfs2_set_inode_flags(inode);
275
	gfs2_set_aops(inode);
276 277
out_trans_end:
	gfs2_trans_end(sdp);
278 279
out:
	gfs2_glock_dq_uninit(&gh);
M
Miklos Szeredi 已提交
280
out_drop_write:
A
Al Viro 已提交
281
	mnt_drop_write_file(filp);
282 283 284
	return error;
}

285
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
286
{
A
Al Viro 已提交
287
	struct inode *inode = file_inode(filp);
288 289 290
	u32 fsflags, gfsflags = 0;
	u32 mask;
	int i;
291

292
	if (get_user(fsflags, ptr))
293
		return -EFAULT;
294

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
		if (fsflags & fsflag_gfs2flag[i].fsflag) {
			fsflags &= ~fsflag_gfs2flag[i].fsflag;
			gfsflags |= fsflag_gfs2flag[i].gfsflag;
		}
	}
	if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
		return -EINVAL;

	mask = GFS2_FLAGS_USER_SET;
	if (S_ISDIR(inode->i_mode)) {
		mask &= ~GFS2_DIF_JDATA;
	} else {
		/* The GFS2_DIF_TOPDIR flag is only valid for directories. */
		if (gfsflags & GFS2_DIF_TOPDIR)
			return -EINVAL;
		mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
312
	}
313 314

	return do_gfs2_set_flags(filp, gfsflags, mask);
315 316
}

317
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
318 319
{
	switch(cmd) {
320
	case FS_IOC_GETFLAGS:
321
		return gfs2_get_flags(filp, (u32 __user *)arg);
322
	case FS_IOC_SETFLAGS:
323
		return gfs2_set_flags(filp, (u32 __user *)arg);
S
Steven Whitehouse 已提交
324 325
	case FITRIM:
		return gfs2_fitrim(filp, (void __user *)arg);
326 327 328 329
	}
	return -ENOTTY;
}

330 331
/**
 * gfs2_size_hint - Give a hint to the size of a write request
332
 * @filep: The struct file
333 334 335 336 337 338 339 340 341 342 343
 * @offset: The file offset of the write
 * @size: The length of the write
 *
 * When we are about to do a write, this function records the total
 * write size in order to provide a suitable hint to the lower layers
 * about how many blocks will be required.
 *
 */

static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
{
A
Al Viro 已提交
344
	struct inode *inode = file_inode(filep);
345 346 347 348 349
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *ip = GFS2_I(inode);
	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
	int hint = min_t(size_t, INT_MAX, blks);

350 351
	if (hint > atomic_read(&ip->i_res.rs_sizehint))
		atomic_set(&ip->i_res.rs_sizehint, hint);
352 353
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367
/**
 * gfs2_allocate_page_backing - Use bmap to allocate blocks
 * @page: The (locked) page to allocate backing for
 *
 * We try to allocate all the blocks required for the page in
 * one go. This might fail for various reasons, so we keep
 * trying until all the blocks to back this page are allocated.
 * If some of the blocks are already allocated, thats ok too.
 */

static int gfs2_allocate_page_backing(struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct buffer_head bh;
368 369
	unsigned long size = PAGE_SIZE;
	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
370 371 372 373

	do {
		bh.b_state = 0;
		bh.b_size = size;
374
		gfs2_block_map(inode, lblock, &bh, 1);
375 376 377 378 379 380 381 382 383 384 385
		if (!buffer_mapped(&bh))
			return -EIO;
		size -= bh.b_size;
		lblock += (bh.b_size >> inode->i_blkbits);
	} while(size > 0);
	return 0;
}

/**
 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 * @vma: The virtual memory area
386
 * @vmf: The virtual memory fault containing the page to become writable
387 388 389 390 391
 *
 * When the page becomes writable, we need to ensure that we have
 * blocks allocated on disk to back that page.
 */

392
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
393
{
394
	struct page *page = vmf->page;
395
	struct inode *inode = file_inode(vmf->vma->vm_file);
396 397
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
398
	struct gfs2_alloc_parms ap = { .aflags = 0, };
399
	unsigned long last_index;
400
	u64 pos = page->index << PAGE_SHIFT;
401 402
	unsigned int data_blocks, ind_blocks, rblocks;
	struct gfs2_holder gh;
S
Steven Whitehouse 已提交
403
	loff_t size;
404 405
	int ret;

406
	sb_start_pagefault(inode->i_sb);
S
Steven Whitehouse 已提交
407

408
	ret = gfs2_rsqa_alloc(ip);
409
	if (ret)
410
		goto out;
411

412
	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
B
Bob Peterson 已提交
413

414 415
	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	ret = gfs2_glock_nq(&gh);
416
	if (ret)
417
		goto out_uninit;
418

419
	/* Update file times before taking page lock */
420
	file_update_time(vmf->vma->vm_file);
421

422 423 424
	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
	set_bit(GIF_SW_PAGED, &ip->i_flags);

425
	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
S
Steven Whitehouse 已提交
426 427 428 429 430
		lock_page(page);
		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
			ret = -EAGAIN;
			unlock_page(page);
		}
431
		goto out_unlock;
S
Steven Whitehouse 已提交
432 433
	}

434 435
	ret = gfs2_rindex_update(sdp);
	if (ret)
436 437
		goto out_unlock;

438
	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
439
	ap.target = data_blocks + ind_blocks;
440 441 442
	ret = gfs2_quota_lock_check(ip, &ap);
	if (ret)
		goto out_unlock;
443
	ret = gfs2_inplace_reserve(ip, &ap);
444 445 446 447 448 449
	if (ret)
		goto out_quota_unlock;

	rblocks = RES_DINODE + ind_blocks;
	if (gfs2_is_jdata(ip))
		rblocks += data_blocks ? data_blocks : 1;
450
	if (ind_blocks || data_blocks) {
451
		rblocks += RES_STATFS + RES_QUOTA;
452
		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
453
	}
454 455 456 457 458 459
	ret = gfs2_trans_begin(sdp, rblocks, 0);
	if (ret)
		goto out_trans_fail;

	lock_page(page);
	ret = -EINVAL;
S
Steven Whitehouse 已提交
460
	size = i_size_read(inode);
461
	last_index = (size - 1) >> PAGE_SHIFT;
S
Steven Whitehouse 已提交
462 463 464 465 466 467 468 469 470 471 472 473
	/* Check page index against inode size */
	if (size == 0 || (page->index > last_index))
		goto out_trans_end;

	ret = -EAGAIN;
	/* If truncated, we must retry the operation, we may have raced
	 * with the glock demotion code.
	 */
	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
		goto out_trans_end;

	/* Unstuff, if required, and allocate backing blocks for page */
474
	ret = 0;
S
Steven Whitehouse 已提交
475
	if (gfs2_is_stuffed(ip))
476
		ret = gfs2_unstuff_dinode(ip, page);
S
Steven Whitehouse 已提交
477 478
	if (ret == 0)
		ret = gfs2_allocate_page_backing(page);
479

S
Steven Whitehouse 已提交
480 481 482
out_trans_end:
	if (ret)
		unlock_page(page);
483 484 485 486 487 488 489
	gfs2_trans_end(sdp);
out_trans_fail:
	gfs2_inplace_release(ip);
out_quota_unlock:
	gfs2_quota_unlock(ip);
out_unlock:
	gfs2_glock_dq(&gh);
490
out_uninit:
491
	gfs2_holder_uninit(&gh);
S
Steven Whitehouse 已提交
492 493
	if (ret == 0) {
		set_page_dirty(page);
494
		wait_for_stable_page(page);
S
Steven Whitehouse 已提交
495
	}
496
out:
497
	sb_end_pagefault(inode->i_sb);
S
Steven Whitehouse 已提交
498
	return block_page_mkwrite_return(ret);
499 500
}

501
static const struct vm_operations_struct gfs2_vm_ops = {
502
	.fault = filemap_fault,
503
	.map_pages = filemap_map_pages,
504 505 506
	.page_mkwrite = gfs2_page_mkwrite,
};

D
David Teigland 已提交
507 508 509 510 511
/**
 * gfs2_mmap -
 * @file: The file to map
 * @vma: The VMA which described the mapping
 *
512 513 514 515 516
 * There is no need to get a lock here unless we should be updating
 * atime. We ignore any locking errors since the only consequence is
 * a missed atime update (which will just be deferred until later).
 *
 * Returns: 0
D
David Teigland 已提交
517 518 519 520
 */

static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
{
521
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
D
David Teigland 已提交
522

523 524
	if (!(file->f_flags & O_NOATIME) &&
	    !IS_NOATIME(&ip->i_inode)) {
525 526
		struct gfs2_holder i_gh;
		int error;
D
David Teigland 已提交
527

528 529
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
530 531
		if (error)
			return error;
532 533 534
		/* grab lock to update inode */
		gfs2_glock_dq_uninit(&i_gh);
		file_accessed(file);
535
	}
536
	vma->vm_ops = &gfs2_vm_ops;
D
David Teigland 已提交
537

538
	return 0;
D
David Teigland 已提交
539 540 541
}

/**
542 543 544
 * gfs2_open_common - This is common to open and atomic_open
 * @inode: The inode being opened
 * @file: The file being opened
D
David Teigland 已提交
545
 *
546 547 548 549 550 551
 * This maybe called under a glock or not depending upon how it has
 * been called. We must always be called under a glock for regular
 * files, however. For other file types, it does not matter whether
 * we hold the glock or not.
 *
 * Returns: Error code or 0 for success
D
David Teigland 已提交
552 553
 */

554
int gfs2_open_common(struct inode *inode, struct file *file)
D
David Teigland 已提交
555 556
{
	struct gfs2_file *fp;
557 558 559 560 561 562 563
	int ret;

	if (S_ISREG(inode->i_mode)) {
		ret = generic_file_open(inode, file);
		if (ret)
			return ret;
	}
D
David Teigland 已提交
564

565
	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
D
David Teigland 已提交
566 567 568
	if (!fp)
		return -ENOMEM;

569
	mutex_init(&fp->f_fl_mutex);
D
David Teigland 已提交
570

571
	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
572
	file->private_data = fp;
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	return 0;
}

/**
 * gfs2_open - open a file
 * @inode: the inode to open
 * @file: the struct file for this opening
 *
 * After atomic_open, this function is only used for opening files
 * which are already cached. We must still get the glock for regular
 * files to ensure that we have the file size uptodate for the large
 * file check which is in the common code. That is only an issue for
 * regular files though.
 *
 * Returns: errno
 */

static int gfs2_open(struct inode *inode, struct file *file)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder i_gh;
	int error;
	bool need_unlock = false;
D
David Teigland 已提交
596

597
	if (S_ISREG(ip->i_inode.i_mode)) {
D
David Teigland 已提交
598 599 600
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
					   &i_gh);
		if (error)
601 602 603
			return error;
		need_unlock = true;
	}
D
David Teigland 已提交
604

605
	error = gfs2_open_common(inode, file);
D
David Teigland 已提交
606

607
	if (need_unlock)
D
David Teigland 已提交
608 609 610 611 612 613
		gfs2_glock_dq_uninit(&i_gh);

	return error;
}

/**
614
 * gfs2_release - called to close a struct file
D
David Teigland 已提交
615 616 617 618 619 620
 * @inode: the inode the struct file belongs to
 * @file: the struct file being closed
 *
 * Returns: errno
 */

621
static int gfs2_release(struct inode *inode, struct file *file)
D
David Teigland 已提交
622
{
623
	struct gfs2_inode *ip = GFS2_I(inode);
D
David Teigland 已提交
624

B
Bob Peterson 已提交
625
	kfree(file->private_data);
626
	file->private_data = NULL;
D
David Teigland 已提交
627

628 629
	if (!(file->f_mode & FMODE_WRITE))
		return 0;
630

631
	gfs2_rsqa_delete(ip, &inode->i_writecount);
D
David Teigland 已提交
632 633 634 635 636
	return 0;
}

/**
 * gfs2_fsync - sync the dirty data for a file (across the cluster)
637 638 639
 * @file: the file that points to the dentry
 * @start: the start position in the file to sync
 * @end: the end position in the file to sync
S
Steven Whitehouse 已提交
640
 * @datasync: set if we can ignore timestamp changes
D
David Teigland 已提交
641
 *
642 643 644 645 646 647 648 649 650 651
 * We split the data flushing here so that we don't wait for the data
 * until after we've also sent the metadata to disk. Note that for
 * data=ordered, we will write & wait for the data at the log flush
 * stage anyway, so this is unlikely to make much of a difference
 * except in the data=writeback case.
 *
 * If the fdatawrite fails due to any reason except -EIO, we will
 * continue the remainder of the fsync, although we'll still report
 * the error at the end. This is to match filemap_write_and_wait_range()
 * behaviour.
652
 *
D
David Teigland 已提交
653 654 655
 * Returns: errno
 */

656 657
static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
		      int datasync)
D
David Teigland 已提交
658
{
659 660
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
661
	int sync_state = inode->i_state & I_DIRTY_ALL;
S
Steven Whitehouse 已提交
662
	struct gfs2_inode *ip = GFS2_I(inode);
663
	int ret = 0, ret1 = 0;
D
David Teigland 已提交
664

665 666 667 668 669
	if (mapping->nrpages) {
		ret1 = filemap_fdatawrite_range(mapping, start, end);
		if (ret1 == -EIO)
			return ret1;
	}
670

671 672
	if (!gfs2_is_jdata(ip))
		sync_state &= ~I_DIRTY_PAGES;
S
Steven Whitehouse 已提交
673
	if (datasync)
674
		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
D
David Teigland 已提交
675

S
Steven Whitehouse 已提交
676 677
	if (sync_state) {
		ret = sync_inode_metadata(inode, 1);
678
		if (ret)
S
Steven Whitehouse 已提交
679
			return ret;
680
		if (gfs2_is_jdata(ip))
681 682 683
			ret = file_write_and_wait(file);
		if (ret)
			return ret;
684
		gfs2_ail_flush(ip->i_gl, 1);
685 686
	}

687
	if (mapping->nrpages)
688
		ret = file_fdatawait_range(file, start, end);
689 690

	return ret ? ret : ret1;
D
David Teigland 已提交
691 692
}

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
{
	struct file *file = iocb->ki_filp;
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
	size_t count = iov_iter_count(to);
	struct gfs2_holder gh;
	ssize_t ret;

	if (!count)
		return 0; /* skip atime */

	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);

	gfs2_glock_dq(&gh);
out_uninit:
	gfs2_holder_uninit(&gh);
	return ret;
}

static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	size_t len = iov_iter_count(from);
	loff_t offset = iocb->ki_pos;
	struct gfs2_holder gh;
	ssize_t ret;

	/*
	 * Deferred lock, even if its a write, since we do no allocation on
	 * this path. All we need to change is the atime, and this lock mode
	 * ensures that other nodes have flushed their buffered read caches
	 * (i.e. their page cache entries for this inode). We do not,
	 * unfortunately, have the option of only flushing a range like the
	 * VFS does.
	 */
	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	/* Silently fall back to buffered I/O when writing beyond EOF */
	if (offset + len > i_size_read(&ip->i_inode))
		goto out;

	ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);

out:
	gfs2_glock_dq(&gh);
out_uninit:
	gfs2_holder_uninit(&gh);
	return ret;
}

static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
	ssize_t ret;

	if (iocb->ki_flags & IOCB_DIRECT) {
		ret = gfs2_file_direct_read(iocb, to);
		if (likely(ret != -ENOTBLK))
			return ret;
		iocb->ki_flags &= ~IOCB_DIRECT;
	}
	return generic_file_read_iter(iocb, to);
}

766
/**
A
Al Viro 已提交
767
 * gfs2_file_write_iter - Perform a write to a file
768
 * @iocb: The io context
769
 * @from: The data to write
770 771 772 773 774 775 776 777
 *
 * We have to do a lock/unlock here to refresh the inode size for
 * O_APPEND writes, otherwise we can land up writing at the wrong
 * offset. There is still a race, but provided the app is using its
 * own file locking, this will make O_APPEND work as expected.
 *
 */

A
Al Viro 已提交
778
static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
779 780
{
	struct file *file = iocb->ki_filp;
781 782
	struct inode *inode = file_inode(file);
	struct gfs2_inode *ip = GFS2_I(inode);
783
	ssize_t written = 0, ret;
784

785
	ret = gfs2_rsqa_alloc(ip);
786 787
	if (ret)
		return ret;
788

A
Al Viro 已提交
789
	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
790

791
	if (iocb->ki_flags & IOCB_APPEND) {
792 793 794 795 796 797 798 799
		struct gfs2_holder gh;

		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
		if (ret)
			return ret;
		gfs2_glock_dq_uninit(&gh);
	}

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	inode_lock(inode);
	ret = generic_write_checks(iocb, from);
	if (ret <= 0)
		goto out;

	/* We can write back this queue in page reclaim */
	current->backing_dev_info = inode_to_bdi(inode);

	ret = file_remove_privs(file);
	if (ret)
		goto out2;

	ret = file_update_time(file);
	if (ret)
		goto out2;

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
	if (iocb->ki_flags & IOCB_DIRECT) {
		struct address_space *mapping = file->f_mapping;
		loff_t pos, endbyte;
		ssize_t buffered;

		written = gfs2_file_direct_write(iocb, from);
		if (written < 0 || !iov_iter_count(from))
			goto out2;

		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
		if (unlikely(ret < 0))
			goto out2;
		buffered = ret;

		/*
		 * We need to ensure that the page cache pages are written to
		 * disk and invalidated to preserve the expected O_DIRECT
		 * semantics.
		 */
		pos = iocb->ki_pos;
		endbyte = pos + buffered - 1;
		ret = filemap_write_and_wait_range(mapping, pos, endbyte);
		if (!ret) {
			iocb->ki_pos += buffered;
			written += buffered;
			invalidate_mapping_pages(mapping,
						 pos >> PAGE_SHIFT,
						 endbyte >> PAGE_SHIFT);
		} else {
			/*
			 * We don't know how much we wrote, so just return
			 * the number of bytes which were direct-written
			 */
		}
	} else {
		ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
		if (likely(ret > 0))
			iocb->ki_pos += ret;
	}
855 856 857 858 859 860 861 862 863

out2:
	current->backing_dev_info = NULL;
out:
	inode_unlock(inode);
	if (likely(ret > 0)) {
		/* Handle various SYNC-type writes */
		ret = generic_write_sync(iocb, ret);
	}
864
	return written ? written : ret;
865 866
}

867 868 869
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
			   int mode)
{
870
	struct super_block *sb = inode->i_sb;
871
	struct gfs2_inode *ip = GFS2_I(inode);
872
	loff_t end = offset + len;
873 874 875 876 877
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (unlikely(error))
878
		return error;
879

880
	gfs2_trans_add_meta(ip->i_gl, dibh);
881 882 883 884 885 886 887

	if (gfs2_is_stuffed(ip)) {
		error = gfs2_unstuff_dinode(ip, NULL);
		if (unlikely(error))
			goto out;
	}

888
	while (offset < end) {
889 890
		struct iomap iomap = { };

891 892
		error = gfs2_iomap_get_alloc(inode, offset, end - offset,
					     &iomap);
893
		if (error)
894
			goto out;
895
		offset = iomap.offset + iomap.length;
896
		if (!(iomap.flags & IOMAP_F_NEW))
897
			continue;
898 899 900 901 902
		error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
					 iomap.length >> inode->i_blkbits,
					 GFP_NOFS);
		if (error) {
			fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
903
			goto out;
904
		}
905 906
	}
out:
907
	brelse(dibh);
908 909
	return error;
}
910 911 912 913 914 915 916 917 918 919 920 921 922 923
/**
 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
 *                     blocks, determine how many bytes can be written.
 * @ip:          The inode in question.
 * @len:         Max cap of bytes. What we return in *len must be <= this.
 * @data_blocks: Compute and return the number of data blocks needed
 * @ind_blocks:  Compute and return the number of indirect blocks needed
 * @max_blocks:  The total blocks available to work with.
 *
 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
 */
static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
			    unsigned int *data_blocks, unsigned int *ind_blocks,
			    unsigned int max_blocks)
924
{
925
	loff_t max = *len;
926 927 928 929 930 931 932
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);

	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
		max_data -= tmp;
	}
933

934 935 936 937 938 939 940 941 942
	*data_blocks = max_data;
	*ind_blocks = max_blocks - max_data;
	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
	if (*len > max) {
		*len = max;
		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
	}
}

943
static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
944
{
A
Al Viro 已提交
945
	struct inode *inode = file_inode(file);
946 947
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_inode *ip = GFS2_I(inode);
948
	struct gfs2_alloc_parms ap = { .aflags = 0, };
949
	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
950
	loff_t bytes, max_bytes, max_blks;
951
	int error;
952 953
	const loff_t pos = offset;
	const loff_t count = len;
954
	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
955
	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
956
	loff_t max_chunk_size = UINT_MAX & bsize_mask;
957

958 959
	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;

960
	offset &= bsize_mask;
961 962 963 964 965

	len = next - offset;
	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
	if (!bytes)
		bytes = UINT_MAX;
966 967 968
	bytes &= bsize_mask;
	if (bytes == 0)
		bytes = sdp->sd_sb.sb_bsize;
969

970
	gfs2_size_hint(file, offset, len);
B
Bob Peterson 已提交
971

972 973 974
	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
	ap.min_target = data_blocks + ind_blocks;

975 976 977
	while (len > 0) {
		if (len < bytes)
			bytes = len;
978 979 980 981 982
		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
			len -= bytes;
			offset += bytes;
			continue;
		}
983 984 985 986 987 988 989 990 991 992 993

		/* We need to determine how many bytes we can actually
		 * fallocate without exceeding quota or going over the
		 * end of the fs. We start off optimistically by assuming
		 * we can write max_bytes */
		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;

		/* Since max_bytes is most likely a theoretical max, we
		 * calculate a more realistic 'bytes' to serve as a good
		 * starting point for the number of bytes we may be able
		 * to write */
994
		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
995
		ap.target = data_blocks + ind_blocks;
996 997

		error = gfs2_quota_lock_check(ip, &ap);
998
		if (error)
999
			return error;
1000 1001
		/* ap.allowed tells us how many blocks quota will allow
		 * us to write. Check if this reduces max_blks */
1002 1003
		max_blks = UINT_MAX;
		if (ap.allowed)
1004
			max_blks = ap.allowed;
1005

1006
		error = gfs2_inplace_reserve(ip, &ap);
1007
		if (error)
1008
			goto out_qunlock;
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

		/* check if the selected rgrp limits our max_blks further */
		if (ap.allowed && ap.allowed < max_blks)
			max_blks = ap.allowed;

		/* Almost done. Calculate bytes that can be written using
		 * max_blks. We also recompute max_bytes, data_blocks and
		 * ind_blocks */
		calc_max_reserv(ip, &max_bytes, &data_blocks,
				&ind_blocks, max_blks);
1019 1020

		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1021
			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1022 1023 1024 1025
		if (gfs2_is_jdata(ip))
			rblocks += data_blocks ? data_blocks : 1;

		error = gfs2_trans_begin(sdp, rblocks,
1026
					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		if (error)
			goto out_trans_fail;

		error = fallocate_chunk(inode, offset, max_bytes, mode);
		gfs2_trans_end(sdp);

		if (error)
			goto out_trans_fail;

		len -= max_bytes;
		offset += max_bytes;
		gfs2_inplace_release(ip);
		gfs2_quota_unlock(ip);
	}
1041

1042 1043
	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
		i_size_write(inode, pos + count);
1044
		file_update_time(file);
1045
		mark_inode_dirty(inode);
1046 1047
	}

1048 1049 1050 1051
	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
		return vfs_fsync_range(file, pos, pos + count - 1,
			       (file->f_flags & __O_SYNC) ? 0 : 1);
	return 0;
1052 1053 1054 1055 1056

out_trans_fail:
	gfs2_inplace_release(ip);
out_qunlock:
	gfs2_quota_unlock(ip);
1057 1058 1059 1060 1061 1062
	return error;
}

static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
	struct inode *inode = file_inode(file);
1063
	struct gfs2_sbd *sdp = GFS2_SB(inode);
1064 1065 1066 1067
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder gh;
	int ret;

1068
	if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1069 1070 1071
		return -EOPNOTSUPP;
	/* fallocate is needed by gfs2_grow to reserve space in the rindex */
	if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1072 1073
		return -EOPNOTSUPP;

A
Al Viro 已提交
1074
	inode_lock(inode);
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091

	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out_uninit;

	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
	    (offset + len) > inode->i_size) {
		ret = inode_newsize_ok(inode, offset + len);
		if (ret)
			goto out_unlock;
	}

	ret = get_write_access(inode);
	if (ret)
		goto out_unlock;

1092 1093 1094 1095 1096 1097
	if (mode & FALLOC_FL_PUNCH_HOLE) {
		ret = __gfs2_punch_hole(file, offset, len);
	} else {
		ret = gfs2_rsqa_alloc(ip);
		if (ret)
			goto out_putw;
1098

1099 1100 1101 1102 1103
		ret = __gfs2_fallocate(file, mode, offset, len);

		if (ret)
			gfs2_rs_deltree(&ip->i_res);
	}
1104

1105 1106
out_putw:
	put_write_access(inode);
1107
out_unlock:
1108
	gfs2_glock_dq(&gh);
1109
out_uninit:
1110
	gfs2_holder_uninit(&gh);
A
Al Viro 已提交
1111
	inode_unlock(inode);
1112
	return ret;
1113 1114
}

1115 1116 1117 1118 1119 1120 1121
static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
				      struct file *out, loff_t *ppos,
				      size_t len, unsigned int flags)
{
	int error;
	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);

1122
	error = gfs2_rsqa_alloc(ip);
1123 1124 1125 1126 1127 1128 1129 1130
	if (error)
		return (ssize_t)error;

	gfs2_size_hint(out, *ppos, len);

	return iter_file_splice_write(pipe, out, ppos, len, flags);
}

1131 1132
#ifdef CONFIG_GFS2_FS_LOCKING_DLM

D
David Teigland 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
/**
 * gfs2_lock - acquire/release a posix lock on a file
 * @file: the file pointer
 * @cmd: either modify or retrieve lock state, possibly wait
 * @fl: type and range of lock
 *
 * Returns: errno
 */

static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
{
1144 1145
	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1146
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
D
David Teigland 已提交
1147 1148 1149

	if (!(fl->fl_flags & FL_POSIX))
		return -ENOLCK;
1150
	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
D
David Teigland 已提交
1151 1152
		return -ENOLCK;

M
Marc Eshel 已提交
1153 1154 1155 1156 1157
	if (cmd == F_CANCELLK) {
		/* Hack: */
		cmd = F_SETLK;
		fl->fl_type = F_UNLCK;
	}
1158 1159
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
		if (fl->fl_type == F_UNLCK)
1160
			locks_lock_file_wait(file, fl);
1161
		return -EIO;
1162
	}
D
David Teigland 已提交
1163
	if (IS_GETLK(cmd))
1164
		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
D
David Teigland 已提交
1165
	else if (fl->fl_type == F_UNLCK)
1166
		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
D
David Teigland 已提交
1167
	else
1168
		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
D
David Teigland 已提交
1169 1170 1171 1172
}

static int do_flock(struct file *file, int cmd, struct file_lock *fl)
{
1173
	struct gfs2_file *fp = file->private_data;
D
David Teigland 已提交
1174
	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
A
Al Viro 已提交
1175
	struct gfs2_inode *ip = GFS2_I(file_inode(file));
D
David Teigland 已提交
1176 1177
	struct gfs2_glock *gl;
	unsigned int state;
B
Bob Peterson 已提交
1178
	u16 flags;
D
David Teigland 已提交
1179
	int error = 0;
1180
	int sleeptime;
D
David Teigland 已提交
1181 1182

	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1183
	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
D
David Teigland 已提交
1184

1185
	mutex_lock(&fp->f_fl_mutex);
D
David Teigland 已提交
1186

1187
	if (gfs2_holder_initialized(fl_gh)) {
D
David Teigland 已提交
1188 1189
		if (fl_gh->gh_state == state)
			goto out;
1190
		locks_lock_file_wait(file,
J
Junxiao Bi 已提交
1191 1192 1193 1194
				     &(struct file_lock) {
					     .fl_type = F_UNLCK,
					     .fl_flags = FL_FLOCK
				     });
1195
		gfs2_glock_dq(fl_gh);
1196
		gfs2_holder_reinit(state, flags, fl_gh);
D
David Teigland 已提交
1197
	} else {
1198 1199
		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
				       &gfs2_flock_glops, CREATE, &gl);
D
David Teigland 已提交
1200 1201
		if (error)
			goto out;
1202 1203
		gfs2_holder_init(gl, state, flags, fl_gh);
		gfs2_glock_put(gl);
D
David Teigland 已提交
1204
	}
1205 1206 1207 1208 1209 1210 1211 1212
	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
		error = gfs2_glock_nq(fl_gh);
		if (error != GLR_TRYFAILED)
			break;
		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
		fl_gh->gh_error = 0;
		msleep(sleeptime);
	}
D
David Teigland 已提交
1213 1214 1215 1216 1217
	if (error) {
		gfs2_holder_uninit(fl_gh);
		if (error == GLR_TRYFAILED)
			error = -EAGAIN;
	} else {
1218
		error = locks_lock_file_wait(file, fl);
1219
		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
D
David Teigland 已提交
1220 1221
	}

1222
out:
1223
	mutex_unlock(&fp->f_fl_mutex);
D
David Teigland 已提交
1224 1225 1226 1227 1228
	return error;
}

static void do_unflock(struct file *file, struct file_lock *fl)
{
1229
	struct gfs2_file *fp = file->private_data;
D
David Teigland 已提交
1230 1231
	struct gfs2_holder *fl_gh = &fp->f_fl_gh;

1232
	mutex_lock(&fp->f_fl_mutex);
1233
	locks_lock_file_wait(file, fl);
A
Andreas Gruenbacher 已提交
1234
	if (gfs2_holder_initialized(fl_gh)) {
1235
		gfs2_glock_dq(fl_gh);
1236 1237
		gfs2_holder_uninit(fl_gh);
	}
1238
	mutex_unlock(&fp->f_fl_mutex);
D
David Teigland 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
}

/**
 * gfs2_flock - acquire/release a flock lock on a file
 * @file: the file pointer
 * @cmd: either modify or retrieve lock state, possibly wait
 * @fl: type and range of lock
 *
 * Returns: errno
 */

static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{
	if (!(fl->fl_flags & FL_FLOCK))
		return -ENOLCK;
1254 1255
	if (fl->fl_type & LOCK_MAND)
		return -EOPNOTSUPP;
D
David Teigland 已提交
1256 1257 1258 1259

	if (fl->fl_type == F_UNLCK) {
		do_unflock(file, fl);
		return 0;
1260
	} else {
D
David Teigland 已提交
1261
		return do_flock(file, cmd, fl);
1262
	}
D
David Teigland 已提交
1263 1264
}

1265
const struct file_operations gfs2_file_fops = {
1266
	.llseek		= gfs2_llseek,
1267
	.read_iter	= gfs2_file_read_iter,
A
Al Viro 已提交
1268
	.write_iter	= gfs2_file_write_iter,
1269 1270 1271
	.unlocked_ioctl	= gfs2_ioctl,
	.mmap		= gfs2_mmap,
	.open		= gfs2_open,
1272
	.release	= gfs2_release,
1273 1274 1275
	.fsync		= gfs2_fsync,
	.lock		= gfs2_lock,
	.flock		= gfs2_flock,
1276
	.splice_read	= generic_file_splice_read,
1277
	.splice_write	= gfs2_file_splice_write,
1278
	.setlease	= simple_nosetlease,
1279
	.fallocate	= gfs2_fallocate,
D
David Teigland 已提交
1280 1281
};

1282
const struct file_operations gfs2_dir_fops = {
A
Al Viro 已提交
1283
	.iterate_shared	= gfs2_readdir,
1284 1285
	.unlocked_ioctl	= gfs2_ioctl,
	.open		= gfs2_open,
1286
	.release	= gfs2_release,
1287 1288 1289
	.fsync		= gfs2_fsync,
	.lock		= gfs2_lock,
	.flock		= gfs2_flock,
1290
	.llseek		= default_llseek,
D
David Teigland 已提交
1291 1292
};

1293 1294
#endif /* CONFIG_GFS2_FS_LOCKING_DLM */

1295
const struct file_operations gfs2_file_fops_nolock = {
1296
	.llseek		= gfs2_llseek,
1297
	.read_iter	= gfs2_file_read_iter,
A
Al Viro 已提交
1298
	.write_iter	= gfs2_file_write_iter,
1299 1300 1301
	.unlocked_ioctl	= gfs2_ioctl,
	.mmap		= gfs2_mmap,
	.open		= gfs2_open,
1302
	.release	= gfs2_release,
1303
	.fsync		= gfs2_fsync,
1304
	.splice_read	= generic_file_splice_read,
1305
	.splice_write	= gfs2_file_splice_write,
1306
	.setlease	= generic_setlease,
1307
	.fallocate	= gfs2_fallocate,
1308 1309
};

1310
const struct file_operations gfs2_dir_fops_nolock = {
A
Al Viro 已提交
1311
	.iterate_shared	= gfs2_readdir,
1312 1313
	.unlocked_ioctl	= gfs2_ioctl,
	.open		= gfs2_open,
1314
	.release	= gfs2_release,
1315
	.fsync		= gfs2_fsync,
1316
	.llseek		= default_llseek,
1317 1318
};