glops.c 16.0 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
13
#include <linux/gfs2_ondisk.h>
14
#include <linux/bio.h>
15
#include <linux/posix_acl.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
29
#include "dir.h"
D
David Teigland 已提交
30

31 32
struct workqueue_struct *gfs2_freeze_wq;

33 34 35 36 37 38 39 40 41 42 43
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
	fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
	fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
	gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
}

44
/**
S
Steven Whitehouse 已提交
45
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
46
 * @gl: the glock
47
 * @fsync: set when called from fsync (not all buffers will be clean)
48 49 50 51
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

52 53
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
54 55 56
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *head = &gl->gl_ail_list;
57
	struct gfs2_bufdata *bd, *tmp;
58
	struct buffer_head *bh;
59
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
60

61
	gfs2_log_lock(sdp);
D
Dave Chinner 已提交
62
	spin_lock(&sdp->sd_ail_lock);
63 64 65
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
66
		bh = bd->bd_bh;
67 68 69
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
70
			gfs2_ail_error(gl, bh);
71
		}
72
		gfs2_trans_add_revoke(sdp, bd);
73
		nr_revokes--;
74
	}
75
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
76
	spin_unlock(&sdp->sd_ail_lock);
77
	gfs2_log_unlock(sdp);
S
Steven Whitehouse 已提交
78 79 80 81 82 83 84 85 86
}


static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_trans tr;

	memset(&tr, 0, sizeof(tr));
87 88
	INIT_LIST_HEAD(&tr.tr_buf);
	INIT_LIST_HEAD(&tr.tr_databuf);
S
Steven Whitehouse 已提交
89 90 91 92 93
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

	if (!tr.tr_revokes)
		return;

94 95 96
	/* A shortened, inline version of gfs2_trans_begin()
         * tr->alloced is not set since the transaction structure is
         * on the stack */
S
Steven Whitehouse 已提交
97
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
98
	tr.tr_ip = _RET_IP_;
99
	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
100
		return;
101
	WARN_ON_ONCE(current->journal_info);
S
Steven Whitehouse 已提交
102 103
	current->journal_info = &tr;

104
	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
S
Steven Whitehouse 已提交
105 106

	gfs2_trans_end(sdp);
107
	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
108
}
109

110
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
S
Steven Whitehouse 已提交
111 112 113
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
114
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
S
Steven Whitehouse 已提交
115 116 117 118 119
	int ret;

	if (!revokes)
		return;

120 121 122 123
	while (revokes > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);

	ret = gfs2_trans_begin(sdp, 0, max_revokes);
S
Steven Whitehouse 已提交
124 125
	if (ret)
		return;
126
	__gfs2_ail_flush(gl, fsync, max_revokes);
127
	gfs2_trans_end(sdp);
128
	gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
129
}
S
Steven Whitehouse 已提交
130 131

/**
S
Steven Whitehouse 已提交
132
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
133 134 135 136 137 138 139
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

S
Steven Whitehouse 已提交
140
static void rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
141
{
142 143
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
144
	struct gfs2_rgrpd *rgd;
S
Steven Whitehouse 已提交
145 146 147
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
S
Steven Whitehouse 已提交
148
		return;
149
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
150

151
	gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
152 153 154
	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
155
	gfs2_ail_empty_gl(gl);
156

157 158 159 160 161
	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
D
David Teigland 已提交
162 163 164
}

/**
S
Steven Whitehouse 已提交
165
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
166 167 168
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
169 170 171
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
172 173
 */

S
Steven Whitehouse 已提交
174
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
175
{
176 177
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = &sdp->sd_aspace;
D
David Teigland 已提交
178

179
	WARN_ON_ONCE(!(flags & DIO_METADATA));
180
	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
181
	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
182

S
Steven Whitehouse 已提交
183 184
	if (gl->gl_object) {
		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
185 186
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
	}
D
David Teigland 已提交
187 188
}

S
Steven Whitehouse 已提交
189 190 191 192 193 194 195 196 197
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
198
	struct address_space *metamapping = gfs2_glock2aspace(gl);
199 200
	int error;

S
Steven Whitehouse 已提交
201 202
	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
203 204 205 206 207
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
S
Steven Whitehouse 已提交
208 209
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
S
Steven Whitehouse 已提交
210

211
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
212

213
	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
214 215 216 217 218 219
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
220
	}
S
Steven Whitehouse 已提交
221 222 223
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
224 225 226 227
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
228
	smp_mb__before_atomic();
229
	clear_bit(GLF_DIRTY, &gl->gl_flags);
S
Steven Whitehouse 已提交
230 231
}

D
David Teigland 已提交
232 233 234 235
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
236 237
 *
 * Normally we invalidate everything, but if we are moving into
S
Steven Whitehouse 已提交
238 239
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
240 241 242 243 244
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
245
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
246

S
Steven Whitehouse 已提交
247 248 249
	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));

	if (flags & DIO_METADATA) {
250
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
251
		truncate_inode_pages(mapping, 0);
252
		if (ip) {
253
			set_bit(GIF_INVALID, &ip->i_flags);
254
			forget_all_cached_acls(&ip->i_inode);
255
			gfs2_dir_hash_inval(ip);
256
		}
257 258
	}

259
	if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
260
		gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH);
S
Steven Whitehouse 已提交
261
		gl->gl_sbd->sd_rindex_uptodate = 0;
262
	}
263
	if (ip && S_ISREG(ip->i_inode.i_mode))
264
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
D
David Teigland 已提交
265 266 267 268 269 270 271 272 273
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

274
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
275 276
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
277 278
	struct gfs2_holder *gh;

279 280
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
281 282 283 284 285 286 287

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (gh->gh_list.next != &gl->gl_holders)
			return 0;
	}

288
	return 1;
D
David Teigland 已提交
289 290
}

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/**
 * gfs2_set_nlink - Set the inode's link count based on on-disk info
 * @inode: The inode in question
 * @nlink: The link count
 *
 * If the link count has hit zero, it must never be raised, whatever the
 * on-disk inode might say. When new struct inodes are created the link
 * count is set to 1, so that we can safely use this test even when reading
 * in on disk information for the first time.
 */

static void gfs2_set_nlink(struct inode *inode, u32 nlink)
{
	/*
	 * We will need to review setting the nlink count here in the
	 * light of the forthcoming ro bind mount work. This is a reminder
	 * to do that.
	 */
	if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
		if (nlink == 0)
			clear_nlink(inode);
		else
M
Miklos Szeredi 已提交
313
			set_nlink(inode, nlink);
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	}
}

static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
	struct timespec atime;
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
	};

336 337
	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
354 355
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
400 401 402 403 404 405 406 407 408 409 410
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
411
	struct gfs2_sbd *sdp = gl->gl_sbd;
412
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
413 414
	int error = 0;

415
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
416 417
		return 0;

418
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
419 420 421 422 423
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

424 425 426
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

427
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
428
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
429 430 431 432 433 434 435 436
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
437 438 439 440

	return error;
}

441 442 443 444 445 446 447
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
 *
 */

448
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
449 450 451
{
	const struct gfs2_inode *ip = gl->gl_object;
	if (ip == NULL)
452
		return;
S
Steven Whitehouse 已提交
453
	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
454 455
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
456 457
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
S
Steven Whitehouse 已提交
458
		  (unsigned long long)i_size_read(&ip->i_inode));
459 460
}

D
David Teigland 已提交
461
/**
462
 * freeze_go_sync - promote/demote the freeze glock
D
David Teigland 已提交
463 464 465 466 467 468
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

469
static void freeze_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
470
{
471
	int error = 0;
D
David Teigland 已提交
472 473
	struct gfs2_sbd *sdp = gl->gl_sbd;

474
	if (gl->gl_state == LM_ST_SHARED &&
D
David Teigland 已提交
475
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
476 477 478 479 480 481 482 483
		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
		error = freeze_super(sdp->sd_vfs);
		if (error) {
			printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
			gfs2_assert_withdraw(sdp, 0);
		}
		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
		gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
D
David Teigland 已提交
484 485 486 487
	}
}

/**
488
 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
D
David Teigland 已提交
489 490 491 492
 * @gl: the glock
 *
 */

493
static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
494 495
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
496
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
497
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
498
	struct gfs2_log_header_host head;
D
David Teigland 已提交
499 500
	int error;

501
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
502
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
516
	return 0;
D
David Teigland 已提交
517 518
}

519 520 521 522 523 524 525
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

526
static int freeze_go_demote_ok(const struct gfs2_glock *gl)
527 528 529 530
{
	return 0;
}

531 532 533 534 535 536
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
 * gl_spin lock is held while calling this
 */
537
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
538 539
{
	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
540 541
	struct gfs2_sbd *sdp = gl->gl_sbd;

542
	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
543
		return;
544 545

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
546
	    gl->gl_state == LM_ST_SHARED && ip) {
S
Steven Whitehouse 已提交
547
		gl->gl_lockref.count++;
548
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
S
Steven Whitehouse 已提交
549
			gl->gl_lockref.count--;
550 551 552
	}
}

553
const struct gfs2_glock_operations gfs2_meta_glops = {
554
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
555 556
};

557
const struct gfs2_glock_operations gfs2_inode_glops = {
558
	.go_sync = inode_go_sync,
D
David Teigland 已提交
559 560 561
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
562
	.go_dump = inode_go_dump,
563
	.go_type = LM_TYPE_INODE,
564
	.go_flags = GLOF_ASPACE | GLOF_LRU,
D
David Teigland 已提交
565 566
};

567
const struct gfs2_glock_operations gfs2_rgrp_glops = {
568
	.go_sync = rgrp_go_sync,
S
Steven Whitehouse 已提交
569
	.go_inval = rgrp_go_inval,
570 571
	.go_lock = gfs2_rgrp_go_lock,
	.go_unlock = gfs2_rgrp_go_unlock,
572
	.go_dump = gfs2_rgrp_dump,
573
	.go_type = LM_TYPE_RGRP,
574
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
575 576
};

577 578 579 580
const struct gfs2_glock_operations gfs2_freeze_glops = {
	.go_sync = freeze_go_sync,
	.go_xmote_bh = freeze_go_xmote_bh,
	.go_demote_ok = freeze_go_demote_ok,
581
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
582 583
};

584
const struct gfs2_glock_operations gfs2_iopen_glops = {
585
	.go_type = LM_TYPE_IOPEN,
586
	.go_callback = iopen_go_callback,
587
	.go_flags = GLOF_LRU,
D
David Teigland 已提交
588 589
};

590
const struct gfs2_glock_operations gfs2_flock_glops = {
591
	.go_type = LM_TYPE_FLOCK,
592
	.go_flags = GLOF_LRU,
D
David Teigland 已提交
593 594
};

595
const struct gfs2_glock_operations gfs2_nondisk_glops = {
596
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
597 598
};

599
const struct gfs2_glock_operations gfs2_quota_glops = {
600
	.go_type = LM_TYPE_QUOTA,
601
	.go_flags = GLOF_LVB | GLOF_LRU,
D
David Teigland 已提交
602 603
};

604
const struct gfs2_glock_operations gfs2_journal_glops = {
605
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
606 607
};

608 609 610 611 612 613 614 615 616 617 618
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};