glops.c 15.4 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
13
#include <linux/gfs2_ondisk.h>
14
#include <linux/bio.h>
15
#include <linux/posix_acl.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
29
#include "dir.h"
D
David Teigland 已提交
30

31 32 33 34 35 36 37 38 39 40 41
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
}

42
/**
S
Steven Whitehouse 已提交
43
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44
 * @gl: the glock
45
 * @fsync: set when called from fsync (not all buffers will be clean)
46 47 48 49
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

50 51
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
52 53 54
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *head = &gl->gl_ail_list;
55
	struct gfs2_bufdata *bd, *tmp;
56
	struct buffer_head *bh;
57
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58

59
	gfs2_log_lock(sdp);
D
Dave Chinner 已提交
60
	spin_lock(&sdp->sd_ail_lock);
61 62 63
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
64
		bh = bd->bd_bh;
65 66 67
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
68
			gfs2_ail_error(gl, bh);
69
		}
70
		gfs2_trans_add_revoke(sdp, bd);
71
		nr_revokes--;
72
	}
73
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
74
	spin_unlock(&sdp->sd_ail_lock);
75
	gfs2_log_unlock(sdp);
S
Steven Whitehouse 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
}


static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

	if (!tr.tr_revokes)
		return;

	/* A shortened, inline version of gfs2_trans_begin() */
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
	tr.tr_ip = (unsigned long)__builtin_return_address(0);
93
	sb_start_intwrite(sdp->sd_vfs);
S
Steven Whitehouse 已提交
94
	gfs2_log_reserve(sdp, tr.tr_reserved);
95
	WARN_ON_ONCE(current->journal_info);
S
Steven Whitehouse 已提交
96 97
	current->journal_info = &tr;

98
	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
S
Steven Whitehouse 已提交
99 100 101 102

	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
103

104
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
S
Steven Whitehouse 已提交
105 106 107
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
108
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
S
Steven Whitehouse 已提交
109 110 111 112 113
	int ret;

	if (!revokes)
		return;

114 115 116 117
	while (revokes > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);

	ret = gfs2_trans_begin(sdp, 0, max_revokes);
S
Steven Whitehouse 已提交
118 119
	if (ret)
		return;
120
	__gfs2_ail_flush(gl, fsync, max_revokes);
121 122 123
	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
S
Steven Whitehouse 已提交
124 125

/**
S
Steven Whitehouse 已提交
126
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
127 128 129 130 131 132 133
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

S
Steven Whitehouse 已提交
134
static void rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
135
{
136
	struct address_space *metamapping = gfs2_glock2aspace(gl);
137
	struct gfs2_rgrpd *rgd;
S
Steven Whitehouse 已提交
138 139 140
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
S
Steven Whitehouse 已提交
141
		return;
142
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
143

S
Steven Whitehouse 已提交
144 145 146 147 148
	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
149

150 151 152 153 154
	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
D
David Teigland 已提交
155 156 157
}

/**
S
Steven Whitehouse 已提交
158
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
159 160 161
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
162 163 164
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
165 166
 */

S
Steven Whitehouse 已提交
167
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
168
{
169
	struct address_space *mapping = gfs2_glock2aspace(gl);
D
David Teigland 已提交
170

171
	WARN_ON_ONCE(!(flags & DIO_METADATA));
S
Steven Whitehouse 已提交
172 173
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
	truncate_inode_pages(mapping, 0);
174

S
Steven Whitehouse 已提交
175 176
	if (gl->gl_object) {
		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
177 178
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
	}
D
David Teigland 已提交
179 180
}

S
Steven Whitehouse 已提交
181 182 183 184 185 186 187 188 189
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
190
	struct address_space *metamapping = gfs2_glock2aspace(gl);
191 192
	int error;

S
Steven Whitehouse 已提交
193 194
	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
195 196 197 198 199
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
S
Steven Whitehouse 已提交
200 201
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
S
Steven Whitehouse 已提交
202

203
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
204 205 206 207 208 209 210 211

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
212
	}
S
Steven Whitehouse 已提交
213 214 215
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
216 217 218 219 220 221
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_clear_bit();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
S
Steven Whitehouse 已提交
222 223
}

D
David Teigland 已提交
224 225 226 227
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
S
Steven Whitehouse 已提交
228 229 230 231
 * 
 * Normally we invlidate everything, but if we are moving into
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
232 233 234 235 236
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
237
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
238

S
Steven Whitehouse 已提交
239 240 241
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
242
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
243
		truncate_inode_pages(mapping, 0);
244
		if (ip) {
245
			set_bit(GIF_INVALID, &ip->i_flags);
246
			forget_all_cached_acls(&ip->i_inode);
247
			gfs2_dir_hash_inval(ip);
248
		}
249 250
	}

251 252
	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
		gfs2_log_flush(gl->gl_sbd, NULL);
S
Steven Whitehouse 已提交
253
		gl->gl_sbd->sd_rindex_uptodate = 0;
254
	}
255
	if (ip && S_ISREG(ip->i_inode.i_mode))
256
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
D
David Teigland 已提交
257 258 259 260 261 262 263 264 265
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

266
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
267 268
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
269 270
	struct gfs2_holder *gh;

271 272
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
273 274 275 276 277 278 279

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (gh->gh_list.next != &gl->gl_holders)
			return 0;
	}

280
	return 1;
D
David Teigland 已提交
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
/**
 * gfs2_set_nlink - Set the inode's link count based on on-disk info
 * @inode: The inode in question
 * @nlink: The link count
 *
 * If the link count has hit zero, it must never be raised, whatever the
 * on-disk inode might say. When new struct inodes are created the link
 * count is set to 1, so that we can safely use this test even when reading
 * in on disk information for the first time.
 */

static void gfs2_set_nlink(struct inode *inode, u32 nlink)
{
	/*
	 * We will need to review setting the nlink count here in the
	 * light of the forthcoming ro bind mount work. This is a reminder
	 * to do that.
	 */
	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
		if (nlink == 0)
			clear_nlink(inode);
		else
M
Miklos Szeredi 已提交
305
			set_nlink(inode, nlink);
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	}
}

static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

328 329
	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
346 347
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
392 393 394 395 396 397 398 399 400 401 402
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
403
	struct gfs2_sbd *sdp = gl->gl_sbd;
404
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
405 406
	int error = 0;

407
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
408 409
		return 0;

410
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
411 412 413 414 415
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

416 417 418
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

419
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
420
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
421 422 423 424 425 426 427 428
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
429 430 431 432

	return error;
}

433 434 435 436 437 438 439 440 441 442 443 444 445
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
 *
 * Returns: 0 on success, -ENOBUFS when we run out of space
 */

static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
	const struct gfs2_inode *ip = gl->gl_object;
	if (ip == NULL)
		return 0;
S
Steven Whitehouse 已提交
446
	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
447 448
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
449 450
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
S
Steven Whitehouse 已提交
451
		  (unsigned long long)i_size_read(&ip->i_inode));
452 453 454
	return 0;
}

D
David Teigland 已提交
455
/**
456
 * trans_go_sync - promote/demote the transaction glock
D
David Teigland 已提交
457 458 459 460 461 462
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

463
static void trans_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
}

/**
 * trans_go_xmote_bh - After promoting/demoting the transaction glock
 * @gl: the glock
 *
 */

480
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
481 482
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
483
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
484
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
485
	struct gfs2_log_header_host head;
D
David Teigland 已提交
486 487
	int error;

488
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
489
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
503
	return 0;
D
David Teigland 已提交
504 505
}

506 507 508 509 510 511 512 513 514 515 516 517
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

static int trans_go_demote_ok(const struct gfs2_glock *gl)
{
	return 0;
}

518 519 520 521 522 523
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
524
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
525 526
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
527 528
	struct gfs2_sbd *sdp = gl->gl_sbd;

529
	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
530
		return;
531 532

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
533
	    gl->gl_state == LM_ST_SHARED && ip) {
S
Steven Whitehouse 已提交
534
		gl->gl_lockref.count++;
535
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
S
Steven Whitehouse 已提交
536
			gl->gl_lockref.count--;
537 538 539
	}
}

540
const struct gfs2_glock_operations gfs2_meta_glops = {
541
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
542 543
};

544
const struct gfs2_glock_operations gfs2_inode_glops = {
545
	.go_sync = inode_go_sync,
D
David Teigland 已提交
546 547 548
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
549
	.go_dump = inode_go_dump,
550
	.go_type = LM_TYPE_INODE,
551
	.go_flags = GLOF_ASPACE,
D
David Teigland 已提交
552 553
};

554
const struct gfs2_glock_operations gfs2_rgrp_glops = {
555
	.go_sync = rgrp_go_sync,
S
Steven Whitehouse 已提交
556
	.go_inval = rgrp_go_inval,
557 558
	.go_lock = gfs2_rgrp_go_lock,
	.go_unlock = gfs2_rgrp_go_unlock,
559
	.go_dump = gfs2_rgrp_dump,
560
	.go_type = LM_TYPE_RGRP,
561
	.go_flags = GLOF_ASPACE | GLOF_LVB,
D
David Teigland 已提交
562 563
};

564
const struct gfs2_glock_operations gfs2_trans_glops = {
565
	.go_sync = trans_go_sync,
D
David Teigland 已提交
566
	.go_xmote_bh = trans_go_xmote_bh,
567
	.go_demote_ok = trans_go_demote_ok,
568
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
569 570
};

571
const struct gfs2_glock_operations gfs2_iopen_glops = {
572
	.go_type = LM_TYPE_IOPEN,
573
	.go_callback = iopen_go_callback,
D
David Teigland 已提交
574 575
};

576
const struct gfs2_glock_operations gfs2_flock_glops = {
577
	.go_type = LM_TYPE_FLOCK,
D
David Teigland 已提交
578 579
};

580
const struct gfs2_glock_operations gfs2_nondisk_glops = {
581
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
582 583
};

584
const struct gfs2_glock_operations gfs2_quota_glops = {
585
	.go_type = LM_TYPE_QUOTA,
586
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
587 588
};

589
const struct gfs2_glock_operations gfs2_journal_glops = {
590
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
591 592
};

593 594 595 596 597 598 599 600 601 602 603
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};