glops.c 15.5 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
13
#include <linux/gfs2_ondisk.h>
14
#include <linux/bio.h>
15
#include <linux/posix_acl.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
29
#include "dir.h"
D
David Teigland 已提交
30

31 32 33 34 35 36 37 38 39 40 41
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
}

42
/**
S
Steven Whitehouse 已提交
43
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44
 * @gl: the glock
45
 * @fsync: set when called from fsync (not all buffers will be clean)
46 47 48 49
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

50 51
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
52 53 54
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *head = &gl->gl_ail_list;
55
	struct gfs2_bufdata *bd, *tmp;
56
	struct buffer_head *bh;
57
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58

59
	gfs2_log_lock(sdp);
D
Dave Chinner 已提交
60
	spin_lock(&sdp->sd_ail_lock);
61 62 63
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
64
		bh = bd->bd_bh;
65 66 67
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
68
			gfs2_ail_error(gl, bh);
69
		}
70
		gfs2_trans_add_revoke(sdp, bd);
71
		nr_revokes--;
72
	}
73
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
74
	spin_unlock(&sdp->sd_ail_lock);
75
	gfs2_log_unlock(sdp);
S
Steven Whitehouse 已提交
76 77 78 79 80 81 82 83 84
}


static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
85 86
	INIT_LIST_HEAD(&tr.tr_buf);
	INIT_LIST_HEAD(&tr.tr_databuf);
S
Steven Whitehouse 已提交
87 88 89 90 91 92 93 94
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

	if (!tr.tr_revokes)
		return;

	/* A shortened, inline version of gfs2_trans_begin() */
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
	tr.tr_ip = (unsigned long)__builtin_return_address(0);
95
	sb_start_intwrite(sdp->sd_vfs);
S
Steven Whitehouse 已提交
96
	gfs2_log_reserve(sdp, tr.tr_reserved);
97
	WARN_ON_ONCE(current->journal_info);
S
Steven Whitehouse 已提交
98 99
	current->journal_info = &tr;

100
	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
S
Steven Whitehouse 已提交
101 102 103 104

	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
105

106
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
S
Steven Whitehouse 已提交
107 108 109
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
110
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
S
Steven Whitehouse 已提交
111 112 113 114 115
	int ret;

	if (!revokes)
		return;

116 117 118 119
	while (revokes > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);

	ret = gfs2_trans_begin(sdp, 0, max_revokes);
S
Steven Whitehouse 已提交
120 121
	if (ret)
		return;
122
	__gfs2_ail_flush(gl, fsync, max_revokes);
123 124 125
	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
S
Steven Whitehouse 已提交
126 127

/**
S
Steven Whitehouse 已提交
128
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
129 130 131 132 133 134 135
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

S
Steven Whitehouse 已提交
136
static void rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
137
{
138 139
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
140
	struct gfs2_rgrpd *rgd;
S
Steven Whitehouse 已提交
141 142 143
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
S
Steven Whitehouse 已提交
144
		return;
145
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
146

147 148 149 150
	gfs2_log_flush(sdp, gl);
	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
151
	gfs2_ail_empty_gl(gl);
152

153 154 155 156 157
	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
D
David Teigland 已提交
158 159 160
}

/**
S
Steven Whitehouse 已提交
161
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
162 163 164
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
165 166 167
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
168 169
 */

S
Steven Whitehouse 已提交
170
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
171
{
172 173
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
D
David Teigland 已提交
174

175
	WARN_ON_ONCE(!(flags & DIO_METADATA));
176
	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
177
	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
178

S
Steven Whitehouse 已提交
179 180
	if (gl->gl_object) {
		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
181 182
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
	}
D
David Teigland 已提交
183 184
}

S
Steven Whitehouse 已提交
185 186 187 188 189 190 191 192 193
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
194
	struct address_space *metamapping = gfs2_glock2aspace(gl);
195 196
	int error;

S
Steven Whitehouse 已提交
197 198
	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
199 200 201 202 203
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
S
Steven Whitehouse 已提交
204 205
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
S
Steven Whitehouse 已提交
206

207
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
208 209 210 211 212 213 214 215

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
216
	}
S
Steven Whitehouse 已提交
217 218 219
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
220 221 222 223 224 225
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_clear_bit();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
S
Steven Whitehouse 已提交
226 227
}

D
David Teigland 已提交
228 229 230 231
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
S
Steven Whitehouse 已提交
232 233 234 235
 * 
 * Normally we invlidate everything, but if we are moving into
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
236 237 238 239 240
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
241
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
242

S
Steven Whitehouse 已提交
243 244 245
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
246
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
247
		truncate_inode_pages(mapping, 0);
248
		if (ip) {
249
			set_bit(GIF_INVALID, &ip->i_flags);
250
			forget_all_cached_acls(&ip->i_inode);
251
			gfs2_dir_hash_inval(ip);
252
		}
253 254
	}

255 256
	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
		gfs2_log_flush(gl->gl_sbd, NULL);
S
Steven Whitehouse 已提交
257
		gl->gl_sbd->sd_rindex_uptodate = 0;
258
	}
259
	if (ip && S_ISREG(ip->i_inode.i_mode))
260
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
D
David Teigland 已提交
261 262 263 264 265 266 267 268 269
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

270
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
271 272
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
273 274
	struct gfs2_holder *gh;

275 276
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
277 278 279 280 281 282 283

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (gh->gh_list.next != &gl->gl_holders)
			return 0;
	}

284
	return 1;
D
David Teigland 已提交
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
/**
 * gfs2_set_nlink - Set the inode's link count based on on-disk info
 * @inode: The inode in question
 * @nlink: The link count
 *
 * If the link count has hit zero, it must never be raised, whatever the
 * on-disk inode might say. When new struct inodes are created the link
 * count is set to 1, so that we can safely use this test even when reading
 * in on disk information for the first time.
 */

static void gfs2_set_nlink(struct inode *inode, u32 nlink)
{
	/*
	 * We will need to review setting the nlink count here in the
	 * light of the forthcoming ro bind mount work. This is a reminder
	 * to do that.
	 */
	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
		if (nlink == 0)
			clear_nlink(inode);
		else
M
Miklos Szeredi 已提交
309
			set_nlink(inode, nlink);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	}
}

static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

332 333
	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
350 351
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
396 397 398 399 400 401 402 403 404 405 406
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
407
	struct gfs2_sbd *sdp = gl->gl_sbd;
408
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
409 410
	int error = 0;

411
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
412 413
		return 0;

414
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
415 416 417 418 419
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

420 421 422
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

423
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
424
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
425 426 427 428 429 430 431 432
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
433 434 435 436

	return error;
}

437 438 439 440 441 442 443
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
 *
 */

444
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
445 446 447
{
	const struct gfs2_inode *ip = gl->gl_object;
	if (ip == NULL)
448
		return;
S
Steven Whitehouse 已提交
449
	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
450 451
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
452 453
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
S
Steven Whitehouse 已提交
454
		  (unsigned long long)i_size_read(&ip->i_inode));
455 456
}

D
David Teigland 已提交
457
/**
458
 * trans_go_sync - promote/demote the transaction glock
D
David Teigland 已提交
459 460 461 462 463 464
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

465
static void trans_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
}

/**
 * trans_go_xmote_bh - After promoting/demoting the transaction glock
 * @gl: the glock
 *
 */

482
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
483 484
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
485
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
486
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
487
	struct gfs2_log_header_host head;
D
David Teigland 已提交
488 489
	int error;

490
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
491
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
505
	return 0;
D
David Teigland 已提交
506 507
}

508 509 510 511 512 513 514 515 516 517 518 519
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

static int trans_go_demote_ok(const struct gfs2_glock *gl)
{
	return 0;
}

520 521 522 523 524 525
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
526
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
527 528
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
529 530
	struct gfs2_sbd *sdp = gl->gl_sbd;

531
	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
532
		return;
533 534

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
535
	    gl->gl_state == LM_ST_SHARED && ip) {
S
Steven Whitehouse 已提交
536
		gl->gl_lockref.count++;
537
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
S
Steven Whitehouse 已提交
538
			gl->gl_lockref.count--;
539 540 541
	}
}

542
const struct gfs2_glock_operations gfs2_meta_glops = {
543
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
544 545
};

546
const struct gfs2_glock_operations gfs2_inode_glops = {
547
	.go_sync = inode_go_sync,
D
David Teigland 已提交
548 549 550
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
551
	.go_dump = inode_go_dump,
552
	.go_type = LM_TYPE_INODE,
553
	.go_flags = GLOF_ASPACE,
D
David Teigland 已提交
554 555
};

556
const struct gfs2_glock_operations gfs2_rgrp_glops = {
557
	.go_sync = rgrp_go_sync,
S
Steven Whitehouse 已提交
558
	.go_inval = rgrp_go_inval,
559 560
	.go_lock = gfs2_rgrp_go_lock,
	.go_unlock = gfs2_rgrp_go_unlock,
561
	.go_dump = gfs2_rgrp_dump,
562
	.go_type = LM_TYPE_RGRP,
563
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
564 565
};

566
const struct gfs2_glock_operations gfs2_trans_glops = {
567
	.go_sync = trans_go_sync,
D
David Teigland 已提交
568
	.go_xmote_bh = trans_go_xmote_bh,
569
	.go_demote_ok = trans_go_demote_ok,
570
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
571 572
};

573
const struct gfs2_glock_operations gfs2_iopen_glops = {
574
	.go_type = LM_TYPE_IOPEN,
575
	.go_callback = iopen_go_callback,
D
David Teigland 已提交
576 577
};

578
const struct gfs2_glock_operations gfs2_flock_glops = {
579
	.go_type = LM_TYPE_FLOCK,
D
David Teigland 已提交
580 581
};

582
const struct gfs2_glock_operations gfs2_nondisk_glops = {
583
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
584 585
};

586
const struct gfs2_glock_operations gfs2_quota_glops = {
587
	.go_type = LM_TYPE_QUOTA,
588
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
589 590
};

591
const struct gfs2_glock_operations gfs2_journal_glops = {
592
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
593 594
};

595 596 597 598 599 600 601 602 603 604 605
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};