glops.c 16.0 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
13
#include <linux/gfs2_ondisk.h>
14
#include <linux/bio.h>
15
#include <linux/posix_acl.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
29
#include "dir.h"
D
David Teigland 已提交
30

31 32 33 34 35 36 37 38 39 40 41
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
}

42
/**
S
Steven Whitehouse 已提交
43
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44
 * @gl: the glock
45
 * @fsync: set when called from fsync (not all buffers will be clean)
46 47 48 49
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

50 51
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
52 53 54
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *head = &gl->gl_ail_list;
55
	struct gfs2_bufdata *bd, *tmp;
56
	struct buffer_head *bh;
57
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58

59
	gfs2_log_lock(sdp);
D
Dave Chinner 已提交
60
	spin_lock(&sdp->sd_ail_lock);
61 62 63
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
64
		bh = bd->bd_bh;
65 66 67
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
68
			gfs2_ail_error(gl, bh);
69
		}
70
		gfs2_trans_add_revoke(sdp, bd);
71
		nr_revokes--;
72
	}
73
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
74
	spin_unlock(&sdp->sd_ail_lock);
75
	gfs2_log_unlock(sdp);
S
Steven Whitehouse 已提交
76 77 78 79 80 81 82 83 84
}


static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
85 86
	INIT_LIST_HEAD(&tr.tr_buf);
	INIT_LIST_HEAD(&tr.tr_databuf);
S
Steven Whitehouse 已提交
87 88 89 90 91
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

	if (!tr.tr_revokes)
		return;

92 93 94
	/* A shortened, inline version of gfs2_trans_begin()
         * tr->alloced is not set since the transaction structure is
         * on the stack */
S
Steven Whitehouse 已提交
95 96
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
	tr.tr_ip = (unsigned long)__builtin_return_address(0);
97
	sb_start_intwrite(sdp->sd_vfs);
98 99 100 101
	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) {
		sb_end_intwrite(sdp->sd_vfs);
		return;
	}
102
	WARN_ON_ONCE(current->journal_info);
S
Steven Whitehouse 已提交
103 104
	current->journal_info = &tr;

105
	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
S
Steven Whitehouse 已提交
106 107

	gfs2_trans_end(sdp);
108
	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
109
}
110

111
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
S
Steven Whitehouse 已提交
112 113 114
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
115
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
S
Steven Whitehouse 已提交
116 117 118 119 120
	int ret;

	if (!revokes)
		return;

121 122 123 124
	while (revokes > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);

	ret = gfs2_trans_begin(sdp, 0, max_revokes);
S
Steven Whitehouse 已提交
125 126
	if (ret)
		return;
127
	__gfs2_ail_flush(gl, fsync, max_revokes);
128
	gfs2_trans_end(sdp);
129
	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
130
}
S
Steven Whitehouse 已提交
131 132

/**
S
Steven Whitehouse 已提交
133
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
134 135 136 137 138 139 140
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

S
Steven Whitehouse 已提交
141
static void rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
142
{
143 144
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
145
	struct gfs2_rgrpd *rgd;
S
Steven Whitehouse 已提交
146 147 148
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
S
Steven Whitehouse 已提交
149
		return;
150
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
151

152
	gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
153 154 155
	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
156
	gfs2_ail_empty_gl(gl);
157

158 159 160 161 162
	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
D
David Teigland 已提交
163 164 165
}

/**
S
Steven Whitehouse 已提交
166
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
167 168 169
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
170 171 172
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
173 174
 */

S
Steven Whitehouse 已提交
175
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
176
{
177 178
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
D
David Teigland 已提交
179

180
	WARN_ON_ONCE(!(flags & DIO_METADATA));
181
	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
182
	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
183

S
Steven Whitehouse 已提交
184 185
	if (gl->gl_object) {
		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
186 187
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
	}
D
David Teigland 已提交
188 189
}

S
Steven Whitehouse 已提交
190 191 192 193 194 195 196 197 198
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
199
	struct address_space *metamapping = gfs2_glock2aspace(gl);
200 201
	int error;

S
Steven Whitehouse 已提交
202 203
	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
204 205 206 207 208
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
S
Steven Whitehouse 已提交
209 210
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
S
Steven Whitehouse 已提交
211

212
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
213

214
	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
215 216 217 218 219 220
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
221
	}
S
Steven Whitehouse 已提交
222 223 224
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
225 226 227 228
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
229
	smp_mb__before_atomic();
230
	clear_bit(GLF_DIRTY, &gl->gl_flags);
S
Steven Whitehouse 已提交
231 232
}

D
David Teigland 已提交
233 234 235 236
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
237 238
 *
 * Normally we invalidate everything, but if we are moving into
S
Steven Whitehouse 已提交
239 240
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
241 242 243 244 245
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
246
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
247

S
Steven Whitehouse 已提交
248 249 250
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
251
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
252
		truncate_inode_pages(mapping, 0);
253
		if (ip) {
254
			set_bit(GIF_INVALID, &ip->i_flags);
255
			forget_all_cached_acls(&ip->i_inode);
256
			gfs2_dir_hash_inval(ip);
257
		}
258 259
	}

260
	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
261
		gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
262
		gl->gl_sbd->sd_rindex_uptodate = 0;
263
	}
264
	if (ip && S_ISREG(ip->i_inode.i_mode))
265
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
D
David Teigland 已提交
266 267 268 269 270 271 272 273 274
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

275
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
276 277
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
278 279
	struct gfs2_holder *gh;

280 281
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
282 283 284 285 286 287 288

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (gh->gh_list.next != &gl->gl_holders)
			return 0;
	}

289
	return 1;
D
David Teigland 已提交
290 291
}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
/**
 * gfs2_set_nlink - Set the inode's link count based on on-disk info
 * @inode: The inode in question
 * @nlink: The link count
 *
 * If the link count has hit zero, it must never be raised, whatever the
 * on-disk inode might say. When new struct inodes are created the link
 * count is set to 1, so that we can safely use this test even when reading
 * in on disk information for the first time.
 */

static void gfs2_set_nlink(struct inode *inode, u32 nlink)
{
	/*
	 * We will need to review setting the nlink count here in the
	 * light of the forthcoming ro bind mount work. This is a reminder
	 * to do that.
	 */
	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
		if (nlink == 0)
			clear_nlink(inode);
		else
M
Miklos Szeredi 已提交
314
			set_nlink(inode, nlink);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	}
}

static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

337 338
	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
355 356
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
401 402 403 404 405 406 407 408 409 410 411
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
412
	struct gfs2_sbd *sdp = gl->gl_sbd;
413
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
414 415
	int error = 0;

416
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
417 418
		return 0;

419
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
420 421 422 423 424
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

425 426 427
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

428
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
429
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
430 431 432 433 434 435 436 437
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
438 439 440 441

	return error;
}

442 443 444 445 446 447 448
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
 *
 */

449
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
450 451 452
{
	const struct gfs2_inode *ip = gl->gl_object;
	if (ip == NULL)
453
		return;
S
Steven Whitehouse 已提交
454
	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
455 456
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
457 458
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
S
Steven Whitehouse 已提交
459
		  (unsigned long long)i_size_read(&ip->i_inode));
460 461
}

D
David Teigland 已提交
462
/**
463
 * freeze_go_sync - promote/demote the freeze glock
D
David Teigland 已提交
464 465 466 467 468 469
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

470
static void freeze_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
471 472
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
473
	DEFINE_WAIT(wait);
D
David Teigland 已提交
474

475
	if (gl->gl_state == LM_ST_SHARED &&
D
David Teigland 已提交
476
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
477 478 479 480 481 482 483 484 485
		atomic_set(&sdp->sd_log_freeze, 1);
		wake_up(&sdp->sd_logd_waitq);
		do {
			prepare_to_wait(&sdp->sd_log_frozen_wait, &wait,
					TASK_UNINTERRUPTIBLE);
			if (atomic_read(&sdp->sd_log_freeze))
				io_schedule();
		} while(atomic_read(&sdp->sd_log_freeze));
		finish_wait(&sdp->sd_log_frozen_wait, &wait);
D
David Teigland 已提交
486 487 488 489
	}
}

/**
490
 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
D
David Teigland 已提交
491 492 493 494
 * @gl: the glock
 *
 */

495
static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
496 497
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
498
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
499
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
500
	struct gfs2_log_header_host head;
D
David Teigland 已提交
501 502
	int error;

503
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
504
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
518
	return 0;
D
David Teigland 已提交
519 520
}

521 522 523 524 525 526 527
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

528
static int freeze_go_demote_ok(const struct gfs2_glock *gl)
529 530 531 532
{
	return 0;
}

533 534 535 536 537 538
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
539
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
540 541
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
542 543
	struct gfs2_sbd *sdp = gl->gl_sbd;

544
	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
545
		return;
546 547

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
548
	    gl->gl_state == LM_ST_SHARED && ip) {
S
Steven Whitehouse 已提交
549
		gl->gl_lockref.count++;
550
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
S
Steven Whitehouse 已提交
551
			gl->gl_lockref.count--;
552 553 554
	}
}

555
const struct gfs2_glock_operations gfs2_meta_glops = {
556
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
557 558
};

559
const struct gfs2_glock_operations gfs2_inode_glops = {
560
	.go_sync = inode_go_sync,
D
David Teigland 已提交
561 562 563
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
564
	.go_dump = inode_go_dump,
565
	.go_type = LM_TYPE_INODE,
566
	.go_flags = GLOF_ASPACE,
D
David Teigland 已提交
567 568
};

569
const struct gfs2_glock_operations gfs2_rgrp_glops = {
570
	.go_sync = rgrp_go_sync,
S
Steven Whitehouse 已提交
571
	.go_inval = rgrp_go_inval,
572 573
	.go_lock = gfs2_rgrp_go_lock,
	.go_unlock = gfs2_rgrp_go_unlock,
574
	.go_dump = gfs2_rgrp_dump,
575
	.go_type = LM_TYPE_RGRP,
576
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
577 578
};

579 580 581 582
const struct gfs2_glock_operations gfs2_freeze_glops = {
	.go_sync = freeze_go_sync,
	.go_xmote_bh = freeze_go_xmote_bh,
	.go_demote_ok = freeze_go_demote_ok,
583
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
584 585
};

586
const struct gfs2_glock_operations gfs2_iopen_glops = {
587
	.go_type = LM_TYPE_IOPEN,
588
	.go_callback = iopen_go_callback,
D
David Teigland 已提交
589 590
};

591
const struct gfs2_glock_operations gfs2_flock_glops = {
592
	.go_type = LM_TYPE_FLOCK,
D
David Teigland 已提交
593 594
};

595
const struct gfs2_glock_operations gfs2_nondisk_glops = {
596
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
597 598
};

599
const struct gfs2_glock_operations gfs2_quota_glops = {
600
	.go_type = LM_TYPE_QUOTA,
601
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
602 603
};

604
const struct gfs2_glock_operations gfs2_journal_glops = {
605
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
606 607
};

608 609 610 611 612 613 614 615 616 617 618
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};