glops.c 19.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
D
David Teigland 已提交
2 3
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
5 6 7 8 9
 */

#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
10
#include <linux/gfs2_ondisk.h>
11
#include <linux/bio.h>
12
#include <linux/posix_acl.h>
13
#include <linux/security.h>
D
David Teigland 已提交
14 15

#include "gfs2.h"
16
#include "incore.h"
D
David Teigland 已提交
17 18 19 20 21 22 23 24
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
25
#include "util.h"
26
#include "trans.h"
27
#include "dir.h"
A
Abhi Das 已提交
28
#include "lops.h"
D
David Teigland 已提交
29

30 31
struct workqueue_struct *gfs2_freeze_wq;

32 33
extern struct workqueue_struct *gfs2_control_wq;

34 35
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
36 37 38
	fs_err(gl->gl_name.ln_sbd,
	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
	       "state 0x%lx\n",
39 40
	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
	       bh->b_page->mapping, bh->b_page->flags);
41
	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 43
	       gl->gl_name.ln_type, gl->gl_name.ln_number,
	       gfs2_glock2aspace(gl));
44 45
	gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
	gfs2_withdraw(gl->gl_name.ln_sbd);
46 47
}

48
/**
S
Steven Whitehouse 已提交
49
 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
50
 * @gl: the glock
51
 * @fsync: set when called from fsync (not all buffers will be clean)
52 53 54 55
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

56 57
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
			     unsigned int nr_revokes)
58
{
59
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60
	struct list_head *head = &gl->gl_ail_list;
61
	struct gfs2_bufdata *bd, *tmp;
62
	struct buffer_head *bh;
63
	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
64

65
	gfs2_log_lock(sdp);
D
Dave Chinner 已提交
66
	spin_lock(&sdp->sd_ail_lock);
67 68 69
	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
		if (nr_revokes == 0)
			break;
70
		bh = bd->bd_bh;
71 72 73
		if (bh->b_state & b_state) {
			if (fsync)
				continue;
74
			gfs2_ail_error(gl, bh);
75
		}
76
		gfs2_trans_add_revoke(sdp, bd);
77
		nr_revokes--;
78
	}
79
	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
D
Dave Chinner 已提交
80
	spin_unlock(&sdp->sd_ail_lock);
81
	gfs2_log_unlock(sdp);
S
Steven Whitehouse 已提交
82 83 84
}


85
static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
S
Steven Whitehouse 已提交
86
{
87
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
S
Steven Whitehouse 已提交
88
	struct gfs2_trans tr;
89
	int ret;
S
Steven Whitehouse 已提交
90 91

	memset(&tr, 0, sizeof(tr));
92 93
	INIT_LIST_HEAD(&tr.tr_buf);
	INIT_LIST_HEAD(&tr.tr_databuf);
S
Steven Whitehouse 已提交
94 95
	tr.tr_revokes = atomic_read(&gl->gl_ail_count);

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	if (!tr.tr_revokes) {
		bool have_revokes;
		bool log_in_flight;

		/*
		 * We have nothing on the ail, but there could be revokes on
		 * the sdp revoke queue, in which case, we still want to flush
		 * the log and wait for it to finish.
		 *
		 * If the sdp revoke list is empty too, we might still have an
		 * io outstanding for writing revokes, so we should wait for
		 * it before returning.
		 *
		 * If none of these conditions are true, our revokes are all
		 * flushed and we can return.
		 */
		gfs2_log_lock(sdp);
		have_revokes = !list_empty(&sdp->sd_log_revokes);
		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
		gfs2_log_unlock(sdp);
		if (have_revokes)
			goto flush;
		if (log_in_flight)
			log_flush_wait(sdp);
120
		return 0;
121
	}
S
Steven Whitehouse 已提交
122

123 124 125
	/* A shortened, inline version of gfs2_trans_begin()
         * tr->alloced is not set since the transaction structure is
         * on the stack */
126
	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
127
	tr.tr_ip = _RET_IP_;
128 129 130
	ret = gfs2_log_reserve(sdp, tr.tr_reserved);
	if (ret < 0)
		return ret;
131
	WARN_ON_ONCE(current->journal_info);
S
Steven Whitehouse 已提交
132 133
	current->journal_info = &tr;

134
	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
S
Steven Whitehouse 已提交
135 136

	gfs2_trans_end(sdp);
137
flush:
138 139
	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_AIL_EMPTY_GL);
140
	return 0;
S
Steven Whitehouse 已提交
141
}
142

143
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
S
Steven Whitehouse 已提交
144
{
145
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
S
Steven Whitehouse 已提交
146
	unsigned int revokes = atomic_read(&gl->gl_ail_count);
147
	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
S
Steven Whitehouse 已提交
148 149 150 151 152
	int ret;

	if (!revokes)
		return;

153 154 155 156
	while (revokes > max_revokes)
		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);

	ret = gfs2_trans_begin(sdp, 0, max_revokes);
S
Steven Whitehouse 已提交
157 158
	if (ret)
		return;
159
	__gfs2_ail_flush(gl, fsync, max_revokes);
160
	gfs2_trans_end(sdp);
161 162
	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_AIL_FLUSH);
163
}
S
Steven Whitehouse 已提交
164 165

/**
S
Steven Whitehouse 已提交
166
 * rgrp_go_sync - sync out the metadata for this glock
D
David Teigland 已提交
167 168 169 170
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
171
 * return to caller to demote/unlock the glock until I/O is complete.
D
David Teigland 已提交
172 173
 */

174
static int rgrp_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
175
{
176
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
177
	struct address_space *mapping = &sdp->sd_aspace;
178
	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
S
Steven Whitehouse 已提交
179 180 181
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
182
		return 0;
183
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
184

185 186
	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_RGRP_GO_SYNC);
187 188
	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
189
	WARN_ON_ONCE(error);
190
	mapping_set_error(mapping, error);
191 192
	if (!error)
		error = gfs2_ail_empty_gl(gl);
193

A
Andreas Gruenbacher 已提交
194
	spin_lock(&gl->gl_lockref.lock);
195 196 197
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
A
Andreas Gruenbacher 已提交
198
	spin_unlock(&gl->gl_lockref.lock);
199
	return error;
D
David Teigland 已提交
200 201 202
}

/**
S
Steven Whitehouse 已提交
203
 * rgrp_go_inval - invalidate the metadata for this glock
D
David Teigland 已提交
204 205 206
 * @gl: the glock
 * @flags:
 *
S
Steven Whitehouse 已提交
207 208 209
 * We never used LM_ST_DEFERRED with resource groups, so that we
 * should always see the metadata flag set here.
 *
D
David Teigland 已提交
210 211
 */

S
Steven Whitehouse 已提交
212
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
D
David Teigland 已提交
213
{
214
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
215
	struct address_space *mapping = &sdp->sd_aspace;
216
	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
217 218 219

	if (rgd)
		gfs2_rgrp_brelse(rgd);
D
David Teigland 已提交
220

221
	WARN_ON_ONCE(!(flags & DIO_METADATA));
222
	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
223

224
	if (rgd)
225
		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
D
David Teigland 已提交
226 227
}

228 229 230 231 232 233 234 235 236 237 238 239
static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip;

	spin_lock(&gl->gl_lockref.lock);
	ip = gl->gl_object;
	if (ip)
		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
	spin_unlock(&gl->gl_lockref.lock);
	return ip;
}

240 241 242 243 244 245 246 247 248 249 250
struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
{
	struct gfs2_rgrpd *rgd;

	spin_lock(&gl->gl_lockref.lock);
	rgd = gl->gl_object;
	spin_unlock(&gl->gl_lockref.lock);

	return rgd;
}

251 252 253 254 255 256 257 258 259
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
{
	if (!ip)
		return;

	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
}

S
Steven Whitehouse 已提交
260 261 262 263 264 265
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

266
static int inode_go_sync(struct gfs2_glock *gl)
S
Steven Whitehouse 已提交
267
{
268 269
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
270
	struct address_space *metamapping = gfs2_glock2aspace(gl);
271
	int error = 0;
272

273
	if (isreg) {
274 275 276 277
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
S
Steven Whitehouse 已提交
278
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
279
		goto out;
S
Steven Whitehouse 已提交
280

281
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
S
Steven Whitehouse 已提交
282

283 284
	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_INODE_GO_SYNC);
S
Steven Whitehouse 已提交
285
	filemap_fdatawrite(metamapping);
286
	if (isreg) {
S
Steven Whitehouse 已提交
287 288 289 290
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
291
	}
S
Steven Whitehouse 已提交
292 293 294
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
295 296 297 298
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
299
	smp_mb__before_atomic();
300
	clear_bit(GLF_DIRTY, &gl->gl_flags);
301 302 303

out:
	gfs2_clear_glop_pending(ip);
304
	return error;
S
Steven Whitehouse 已提交
305 306
}

D
David Teigland 已提交
307 308 309 310
/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
311 312
 *
 * Normally we invalidate everything, but if we are moving into
S
Steven Whitehouse 已提交
313 314
 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
 * can keep hold of the metadata, since it won't have changed.
D
David Teigland 已提交
315 316 317 318 319
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
320
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
D
David Teigland 已提交
321

S
Steven Whitehouse 已提交
322
	if (flags & DIO_METADATA) {
323
		struct address_space *mapping = gfs2_glock2aspace(gl);
S
Steven Whitehouse 已提交
324
		truncate_inode_pages(mapping, 0);
325
		if (ip) {
326
			set_bit(GIF_INVALID, &ip->i_flags);
327
			forget_all_cached_acls(&ip->i_inode);
328
			security_inode_invalidate_secctx(&ip->i_inode);
329
			gfs2_dir_hash_inval(ip);
330
		}
331 332
	}

333
	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
334
		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
335 336
			       GFS2_LOG_HEAD_FLUSH_NORMAL |
			       GFS2_LFC_INODE_GO_INVAL);
337
		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
338
	}
339
	if (ip && S_ISREG(ip->i_inode.i_mode))
340
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
341 342

	gfs2_clear_glop_pending(ip);
D
David Teigland 已提交
343 344 345 346 347 348 349 350 351
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

352
static int inode_go_demote_ok(const struct gfs2_glock *gl)
D
David Teigland 已提交
353
{
354
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
355

356 357
	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
		return 0;
358

359
	return 1;
D
David Teigland 已提交
360 361
}

362 363 364
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
	const struct gfs2_dinode *str = buf;
365
	struct timespec64 atime;
366 367 368 369 370 371 372 373 374 375 376 377 378
	u16 height, depth;

	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
		goto corrupt;
	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
	ip->i_inode.i_rdev = 0;
	switch (ip->i_inode.i_mode & S_IFMT) {
	case S_IFBLK:
	case S_IFCHR:
		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
					   be32_to_cpu(str->di_minor));
		break;
379
	}
380

381 382
	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
383
	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
384 385 386 387
	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
	atime.tv_sec = be64_to_cpu(str->di_atime);
	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
388
	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
389 390 391 392 393 394 395 396 397 398
		ip->i_inode.i_atime = atime;
	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);

	ip->i_goal = be64_to_cpu(str->di_goal_meta);
	ip->i_generation = be64_to_cpu(str->di_generation);

	ip->i_diskflags = be32_to_cpu(str->di_flags);
S
Steven Whitehouse 已提交
399 400
	ip->i_eattr = be64_to_cpu(str->di_eattr);
	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	gfs2_set_inode_flags(&ip->i_inode);
	height = be16_to_cpu(str->di_height);
	if (unlikely(height > GFS2_MAX_META_HEIGHT))
		goto corrupt;
	ip->i_height = (u8)height;

	depth = be16_to_cpu(str->di_depth);
	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
		goto corrupt;
	ip->i_depth = (u8)depth;
	ip->i_entries = be32_to_cpu(str->di_entries);

	if (S_ISREG(ip->i_inode.i_mode))
		gfs2_set_aops(&ip->i_inode);

	return 0;
corrupt:
	gfs2_consist_inode(ip);
	return -EIO;
}

/**
 * gfs2_inode_refresh - Refresh the incore copy of the dinode
 * @ip: The GFS2 inode
 *
 * Returns: errno
 */

int gfs2_inode_refresh(struct gfs2_inode *ip)
{
	struct buffer_head *dibh;
	int error;

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	error = gfs2_dinode_in(ip, dibh->b_data);
	brelse(dibh);
	clear_bit(GIF_INVALID, &ip->i_flags);

	return error;
}

D
David Teigland 已提交
445 446 447 448 449 450 451 452 453 454 455
/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
456
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
457
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
458 459
	int error = 0;

460
	if (!ip || (gh->gh_flags & GL_SKIP))
D
David Teigland 已提交
461 462
		return 0;

463
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
464 465 466 467 468
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

469 470 471
	if (gh->gh_state != LM_ST_DEFERRED)
		inode_dio_wait(&ip->i_inode);

472
	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
D
David Teigland 已提交
473
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
474 475 476
	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
		spin_lock(&sdp->sd_trunc_lock);
		if (list_empty(&ip->i_trunc_list))
477
			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
478 479 480 481
		spin_unlock(&sdp->sd_trunc_lock);
		wake_up(&sdp->sd_quota_wait);
		return 1;
	}
D
David Teigland 已提交
482 483 484 485

	return error;
}

486 487 488 489
/**
 * inode_go_dump - print information about an inode
 * @seq: The iterator
 * @ip: the inode
490
 * @fs_id_buf: file system id (may be empty)
491 492 493
 *
 */

494 495
static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
			  const char *fs_id_buf)
496
{
497 498 499 500
	struct gfs2_inode *ip = gl->gl_object;
	struct inode *inode = &ip->i_inode;
	unsigned long nrpages;

501
	if (ip == NULL)
502
		return;
503 504 505 506 507

	xa_lock_irq(&inode->i_data.i_pages);
	nrpages = inode->i_data.nrpages;
	xa_unlock_irq(&inode->i_data.i_pages);

508 509
	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
		       "p:%lu\n", fs_id_buf,
510 511
		  (unsigned long long)ip->i_no_formal_ino,
		  (unsigned long long)ip->i_no_addr,
512 513
		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
		  (unsigned int)ip->i_diskflags,
514
		  (unsigned long long)i_size_read(inode), nrpages);
515 516
}

D
David Teigland 已提交
517
/**
518
 * freeze_go_sync - promote/demote the freeze glock
D
David Teigland 已提交
519 520 521 522 523 524
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

525
static int freeze_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
526
{
527
	int error = 0;
528
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
D
David Teigland 已提交
529

530
	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
D
David Teigland 已提交
531
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
532 533 534
		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
		error = freeze_super(sdp->sd_vfs);
		if (error) {
535 536
			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
				error);
537 538
			if (gfs2_withdrawn(sdp)) {
				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
539
				return 0;
540
			}
541 542 543
			gfs2_assert_withdraw(sdp, 0);
		}
		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
544 545
		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
			       GFS2_LFC_FREEZE_GO_SYNC);
D
David Teigland 已提交
546
	}
547
	return 0;
D
David Teigland 已提交
548 549 550
}

/**
551
 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
D
David Teigland 已提交
552 553 554 555
 * @gl: the glock
 *
 */

556
static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
D
David Teigland 已提交
557
{
558
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
559
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
560
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
561
	struct gfs2_log_header_host head;
D
David Teigland 已提交
562 563
	int error;

564
	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
565
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
566

A
Abhi Das 已提交
567
		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
D
David Teigland 已提交
568 569 570 571 572 573
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
574
		if (!gfs2_withdrawn(sdp)) {
D
David Teigland 已提交
575 576 577 578
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
579
	return 0;
D
David Teigland 已提交
580 581
}

582 583 584 585 586 587 588
/**
 * trans_go_demote_ok
 * @gl: the glock
 *
 * Always returns 0
 */

589
static int freeze_go_demote_ok(const struct gfs2_glock *gl)
590 591 592 593
{
	return 0;
}

594 595 596 597
/**
 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
 * @gl: the glock
 *
A
Andreas Gruenbacher 已提交
598
 * gl_lockref.lock lock is held while calling this
599
 */
600
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
601
{
602
	struct gfs2_inode *ip = gl->gl_object;
603
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
604

605
	if (!remote || sb_rdonly(sdp->sd_vfs))
606
		return;
607 608

	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
609
	    gl->gl_state == LM_ST_SHARED && ip) {
S
Steven Whitehouse 已提交
610
		gl->gl_lockref.count++;
611
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
S
Steven Whitehouse 已提交
612
			gl->gl_lockref.count--;
613 614 615
	}
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
/**
 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
 * @gl: glock being freed
 *
 * For now, this is only used for the journal inode glock. In withdraw
 * situations, we need to wait for the glock to be freed so that we know
 * other nodes may proceed with recovery / journal replay.
 */
static void inode_go_free(struct gfs2_glock *gl)
{
	/* Note that we cannot reference gl_object because it's already set
	 * to NULL by this point in its lifecycle. */
	if (!test_bit(GLF_FREEING, &gl->gl_flags))
		return;
	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
	wake_up_bit(&gl->gl_flags, GLF_FREEING);
}

/**
 * nondisk_go_callback - used to signal when a node did a withdraw
 * @gl: the nondisk glock
 * @remote: true if this came from a different cluster node
 *
 */
static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
{
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

	/* Ignore the callback unless it's from another node, and it's the
	   live lock. */
	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
		return;

	/* First order of business is to cancel the demote request. We don't
	 * really want to demote a nondisk glock. At best it's just to inform
	 * us of another node's withdraw. We'll keep it in SH mode. */
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);

	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
		return;

	/* We only care when a node wants us to unlock, because that means
	 * they want a journal recovered. */
	if (gl->gl_demote_state != LM_ST_UNLOCKED)
		return;

	if (sdp->sd_args.ar_spectator) {
		fs_warn(sdp, "Spectator node cannot recover journals.\n");
		return;
	}

	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
	/*
	 * We can't call remote_withdraw directly here or gfs2_recover_journal
	 * because this is called from the glock unlock function and the
	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
	 * we were called from. So we queue it to the control work queue in
	 * lock_dlm.
	 */
	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
}

683
const struct gfs2_glock_operations gfs2_meta_glops = {
684
	.go_type = LM_TYPE_META,
685
	.go_flags = GLOF_NONDISK,
D
David Teigland 已提交
686 687
};

688
const struct gfs2_glock_operations gfs2_inode_glops = {
689
	.go_sync = inode_go_sync,
D
David Teigland 已提交
690 691 692
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
693
	.go_dump = inode_go_dump,
694
	.go_type = LM_TYPE_INODE,
695
	.go_flags = GLOF_ASPACE | GLOF_LRU,
696
	.go_free = inode_go_free,
D
David Teigland 已提交
697 698
};

699
const struct gfs2_glock_operations gfs2_rgrp_glops = {
700
	.go_sync = rgrp_go_sync,
S
Steven Whitehouse 已提交
701
	.go_inval = rgrp_go_inval,
702
	.go_lock = gfs2_rgrp_go_lock,
703
	.go_dump = gfs2_rgrp_dump,
704
	.go_type = LM_TYPE_RGRP,
705
	.go_flags = GLOF_LVB,
D
David Teigland 已提交
706 707
};

708 709 710 711
const struct gfs2_glock_operations gfs2_freeze_glops = {
	.go_sync = freeze_go_sync,
	.go_xmote_bh = freeze_go_xmote_bh,
	.go_demote_ok = freeze_go_demote_ok,
712
	.go_type = LM_TYPE_NONDISK,
713
	.go_flags = GLOF_NONDISK,
D
David Teigland 已提交
714 715
};

716
const struct gfs2_glock_operations gfs2_iopen_glops = {
717
	.go_type = LM_TYPE_IOPEN,
718
	.go_callback = iopen_go_callback,
719
	.go_flags = GLOF_LRU | GLOF_NONDISK,
D
David Teigland 已提交
720 721
};

722
const struct gfs2_glock_operations gfs2_flock_glops = {
723
	.go_type = LM_TYPE_FLOCK,
724
	.go_flags = GLOF_LRU | GLOF_NONDISK,
D
David Teigland 已提交
725 726
};

727
const struct gfs2_glock_operations gfs2_nondisk_glops = {
728
	.go_type = LM_TYPE_NONDISK,
729
	.go_flags = GLOF_NONDISK,
730
	.go_callback = nondisk_go_callback,
D
David Teigland 已提交
731 732
};

733
const struct gfs2_glock_operations gfs2_quota_glops = {
734
	.go_type = LM_TYPE_QUOTA,
735
	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
D
David Teigland 已提交
736 737
};

738
const struct gfs2_glock_operations gfs2_journal_glops = {
739
	.go_type = LM_TYPE_JOURNAL,
740
	.go_flags = GLOF_NONDISK,
D
David Teigland 已提交
741 742
};

743 744 745 746 747 748 749 750 751 752 753
const struct gfs2_glock_operations *gfs2_glops_list[] = {
	[LM_TYPE_META] = &gfs2_meta_glops,
	[LM_TYPE_INODE] = &gfs2_inode_glops,
	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};