glops.c 14.8 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
13
#include <linux/gfs2_ondisk.h>
14
#include <linux/bio.h>
15
#include <linux/posix_acl.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
29
#include "dir.h"
D
David Teigland 已提交
30

31
/**
S
Steven Whitehouse 已提交
32
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
33 34 35 36 37
 * @gl: the glock
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

S
Steven Whitehouse 已提交
38
static void __gfs2_ail_flush(struct gfs2_glock *gl)
39 40 41 42 43
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *head = &gl->gl_ail_list;
	struct gfs2_bufdata *bd;
	struct buffer_head *bh;
44

D
Dave Chinner 已提交
45
	spin_lock(&sdp->sd_ail_lock);
46 47 48 49
	while (!list_empty(head)) {
		bd = list_entry(head->next, struct gfs2_bufdata,
				bd_ail_gl_list);
		bh = bd->bd_bh;
50
		gfs2_remove_from_ail(bd);
51 52
		bd->bd_bh = NULL;
		bh->b_private = NULL;
53 54
		spin_unlock(&sdp->sd_ail_lock);

55
		bd->bd_blkno = bh->b_blocknr;
D
Dave Chinner 已提交
56
		gfs2_log_lock(sdp);
57 58
		gfs2_assert_withdraw(sdp, !buffer_busy(bh));
		gfs2_trans_add_revoke(sdp, bd);
D
Dave Chinner 已提交
59 60 61
		gfs2_log_unlock(sdp);

		spin_lock(&sdp->sd_ail_lock);
62 63
	}
	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
64
	spin_unlock(&sdp->sd_ail_lock);
S
Steven Whitehouse 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
}


static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

	if (!tr.tr_revokes)
		return;

	/* A shortened, inline version of gfs2_trans_begin() */
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
	tr.tr_ip = (unsigned long)__builtin_return_address(0);
	INIT_LIST_HEAD(&tr.tr_list_buf);
	gfs2_log_reserve(sdp, tr.tr_reserved);
	BUG_ON(current->journal_info);
	current->journal_info = &tr;

	__gfs2_ail_flush(gl);

	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
92

S
Steven Whitehouse 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105
void gfs2_ail_flush(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
	int ret;

	if (!revokes)
		return;

	ret = gfs2_trans_begin(sdp, 0, revokes);
	if (ret)
		return;
	__gfs2_ail_flush(gl);
106 107 108
	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
S
Steven Whitehouse 已提交
109 110

/**
S
Steven Whitehouse 已提交
111
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
112 113 114 115 116 117 118
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

S
Steven Whitehouse 已提交
119
static void rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
120
{
121
	struct address_space *metamapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
122 123 124
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
S
Steven Whitehouse 已提交
125
		return;
S
Steven Whitehouse 已提交
126
	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
127

S
Steven Whitehouse 已提交
128 129 130 131 132
	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
D
David Teigland 已提交
133 134 135
}

/**
S
Steven Whitehouse 已提交
136
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
137 138 139
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
140 141 142
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
143 144
 */

S
Steven Whitehouse 已提交
145
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
146
{
147
	struct address_space *mapping = gfs2_glock2aspace(gl);
D
David Teigland 已提交
148

S
Steven Whitehouse 已提交
149 150 151
	BUG_ON(!(flags & DIO_METADATA));
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
	truncate_inode_pages(mapping, 0);
152

S
Steven Whitehouse 已提交
153 154
	if (gl->gl_object) {
		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
155 156
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
	}
D
David Teigland 已提交
157 158
}

S
Steven Whitehouse 已提交
159 160 161 162 163 164 165 166 167
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
168
	struct address_space *metamapping = gfs2_glock2aspace(gl);
169 170
	int error;

S
Steven Whitehouse 已提交
171 172
	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
S
Steven Whitehouse 已提交
173 174 175 176
	if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
		unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
S
Steven Whitehouse 已提交
177

S
Steven Whitehouse 已提交
178 179 180 181 182 183 184 185 186
	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
187
	}
S
Steven Whitehouse 已提交
188 189 190
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
191 192 193 194 195 196
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_clear_bit();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
S
Steven Whitehouse 已提交
197 198
}

D
David Teigland 已提交
199 200 201 202
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
S
Steven Whitehouse 已提交
203 204 205 206
 * 
 * Normally we invlidate everything, but if we are moving into
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
207 208 209 210 211
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
212
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
213

S
Steven Whitehouse 已提交
214 215 216
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
217
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
218
		truncate_inode_pages(mapping, 0);
219
		if (ip) {
220
			set_bit(GIF_INVALID, &ip->i_flags);
221
			forget_all_cached_acls(&ip->i_inode);
222
			gfs2_dir_hash_inval(ip);
223
		}
224 225
	}

226 227
	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
		gfs2_log_flush(gl->gl_sbd, NULL);
S
Steven Whitehouse 已提交
228
		gl->gl_sbd->sd_rindex_uptodate = 0;
229
	}
230
	if (ip && S_ISREG(ip->i_inode.i_mode))
231
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
D
David Teigland 已提交
232 233 234 235 236 237 238 239 240
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

241
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
242 243
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
244 245
	struct gfs2_holder *gh;

246 247
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
248 249 250 251 252 253 254

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (gh->gh_list.next != &gl->gl_holders)
			return 0;
	}

255
	return 1;
D
David Teigland 已提交
256 257
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
/**
 * gfs2_set_nlink - Set the inode's link count based on on-disk info
 * @inode: The inode in question
 * @nlink: The link count
 *
 * If the link count has hit zero, it must never be raised, whatever the
 * on-disk inode might say. When new struct inodes are created the link
 * count is set to 1, so that we can safely use this test even when reading
 * in on disk information for the first time.
 */

static void gfs2_set_nlink(struct inode *inode, u32 nlink)
{
	/*
	 * We will need to review setting the nlink count here in the
	 * light of the forthcoming ro bind mount work. This is a reminder
	 * to do that.
	 */
	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
		if (nlink == 0)
			clear_nlink(inode);
		else
			inode->i_nlink = nlink;
	}
}

static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
321 322
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
		brelse(dibh);
		return -EIO;
	}

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
372 373 374 375 376 377 378 379 380 381 382
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
383
	struct gfs2_sbd *sdp = gl->gl_sbd;
384
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
385 386
	int error = 0;

387
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
388 389
		return 0;

390
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
391 392 393 394 395
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

396
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
397
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
398 399 400 401 402 403 404 405
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
406 407 408 409

	return error;
}

410 411 412 413 414 415 416 417 418 419 420 421 422
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
 *
 * Returns: 0 on success, -ENOBUFS when we run out of space
 */

static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
	const struct gfs2_inode *ip = gl->gl_object;
	if (ip == NULL)
		return 0;
S
Steven Whitehouse 已提交
423
	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
424 425
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
426 427
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
S
Steven Whitehouse 已提交
428
		  (unsigned long long)i_size_read(&ip->i_inode));
429 430 431
	return 0;
}

D
David Teigland 已提交
432 433 434 435 436 437 438 439 440 441 442
/**
 * rgrp_go_lock - operation done after an rgrp lock is locked by
 *    a first holder on this node.
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int rgrp_go_lock(struct gfs2_holder *gh)
{
443
	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
D
David Teigland 已提交
444 445 446 447 448 449 450 451 452 453 454 455
}

/**
 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
 *    a last holder on this node.
 * @gl: the glock
 * @flags:
 *
 */

static void rgrp_go_unlock(struct gfs2_holder *gh)
{
456
	gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
D
David Teigland 已提交
457 458 459
}

/**
460
 * trans_go_sync - promote/demote the transaction glock
D
David Teigland 已提交
461 462 463 464 465 466
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

467
static void trans_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
}

/**
 * trans_go_xmote_bh - After promoting/demoting the transaction glock
 * @gl: the glock
 *
 */

484
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
485 486
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
487
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
488
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
489
	struct gfs2_log_header_host head;
D
David Teigland 已提交
490 491
	int error;

492
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
493
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
507
	return 0;
D
David Teigland 已提交
508 509
}

510 511 512 513 514 515 516 517 518 519 520 521
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

static int trans_go_demote_ok(const struct gfs2_glock *gl)
{
	return 0;
}

522 523 524 525 526 527 528 529 530
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
static void iopen_go_callback(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
531 532 533 534
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return;
535 536

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
537
	    gl->gl_state == LM_ST_SHARED && ip) {
538 539 540 541 542 543
		gfs2_glock_hold(gl);
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
			gfs2_glock_put_nolock(gl);
	}
}

544
const struct gfs2_glock_operations gfs2_meta_glops = {
545
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
546 547
};

548
const struct gfs2_glock_operations gfs2_inode_glops = {
549
	.go_xmote_th = inode_go_sync,
D
David Teigland 已提交
550 551 552
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
553
	.go_dump = inode_go_dump,
554
	.go_type = LM_TYPE_INODE,
555
	.go_flags = GLOF_ASPACE,
D
David Teigland 已提交
556 557
};

558
const struct gfs2_glock_operations gfs2_rgrp_glops = {
S
Steven Whitehouse 已提交
559 560
	.go_xmote_th = rgrp_go_sync,
	.go_inval = rgrp_go_inval,
D
David Teigland 已提交
561 562
	.go_lock = rgrp_go_lock,
	.go_unlock = rgrp_go_unlock,
563
	.go_dump = gfs2_rgrp_dump,
564
	.go_type = LM_TYPE_RGRP,
565
	.go_flags = GLOF_ASPACE,
D
David Teigland 已提交
566 567
};

568
const struct gfs2_glock_operations gfs2_trans_glops = {
569
	.go_xmote_th = trans_go_sync,
D
David Teigland 已提交
570
	.go_xmote_bh = trans_go_xmote_bh,
571
	.go_demote_ok = trans_go_demote_ok,
572
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
573 574
};

575
const struct gfs2_glock_operations gfs2_iopen_glops = {
576
	.go_type = LM_TYPE_IOPEN,
577
	.go_callback = iopen_go_callback,
D
David Teigland 已提交
578 579
};

580
const struct gfs2_glock_operations gfs2_flock_glops = {
581
	.go_type = LM_TYPE_FLOCK,
D
David Teigland 已提交
582 583
};

584
const struct gfs2_glock_operations gfs2_nondisk_glops = {
585
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
586 587
};

588
const struct gfs2_glock_operations gfs2_quota_glops = {
589
	.go_type = LM_TYPE_QUOTA,
D
David Teigland 已提交
590 591
};

592
const struct gfs2_glock_operations gfs2_journal_glops = {
593
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
594 595
};

596 597 598 599 600 601 602 603 604 605 606
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};