glops.c 10.6 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
14
#include <linux/gfs2_ondisk.h>
15
#include <linux/lm_interface.h>
D
David Teigland 已提交
16 17

#include "gfs2.h"
18
#include "incore.h"
D
David Teigland 已提交
19 20 21 22 23 24 25 26
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "recovery.h"
#include "rgrp.h"
27
#include "util.h"
28
#include "trans.h"
D
David Teigland 已提交
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/**
 * ail_empty_gl - remove all buffers for a given lock from the AIL
 * @gl: the glock
 *
 * None of the buffers should be dirty, locked, or pinned.
 */

static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int blocks;
	struct list_head *head = &gl->gl_ail_list;
	struct gfs2_bufdata *bd;
	struct buffer_head *bh;
	u64 blkno;
	int error;

	blocks = atomic_read(&gl->gl_ail_count);
	if (!blocks)
		return;

	error = gfs2_trans_begin(sdp, 0, blocks);
	if (gfs2_assert_withdraw(sdp, !error))
		return;

	gfs2_log_lock(sdp);
	while (!list_empty(head)) {
		bd = list_entry(head->next, struct gfs2_bufdata,
				bd_ail_gl_list);
		bh = bd->bd_bh;
		blkno = bh->b_blocknr;
		gfs2_assert_withdraw(sdp, !buffer_busy(bh));

63
		gfs2_remove_from_ail(NULL, bd);
64 65 66 67 68 69 70 71 72 73 74 75
		gfs2_log_unlock(sdp);

		gfs2_trans_add_revoke(sdp, blkno);

		gfs2_log_lock(sdp);
	}
	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
	gfs2_log_unlock(sdp);

	gfs2_trans_end(sdp);
	gfs2_log_flush(sdp, NULL);
}
S
Steven Whitehouse 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89

/**
 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
 * @gl: the glock
 *
 */

static void gfs2_pte_inval(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip;
	struct inode *inode;

	ip = gl->gl_object;
	inode = &ip->i_inode;
90
	if (!ip || !S_ISREG(inode->i_mode))
S
Steven Whitehouse 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103
		return;

	if (!test_bit(GIF_PAGED, &ip->i_flags))
		return;

	unmap_shared_mapping_range(inode->i_mapping, 0, 0);

	if (test_bit(GIF_SW_PAGED, &ip->i_flags))
		set_bit(GLF_DIRTY, &gl->gl_flags);

	clear_bit(GIF_SW_PAGED, &ip->i_flags);
}

D
David Teigland 已提交
104 105 106 107 108 109 110 111 112
/**
 * meta_go_sync - sync out the metadata for this glock
 * @gl: the glock
 *
 * Called when demoting or unlocking an EX glock.  We must flush
 * to disk all dirty buffers/pages relating to this glock, and must not
 * not return to caller to demote/unlock the glock until I/O is complete.
 */

113
static void meta_go_sync(struct gfs2_glock *gl)
D
David Teigland 已提交
114
{
S
Steven Whitehouse 已提交
115 116 117
	if (gl->gl_state != LM_ST_EXCLUSIVE)
		return;

D
David Teigland 已提交
118
	if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
119
		gfs2_log_flush(gl->gl_sbd, gl);
S
Steven Whitehouse 已提交
120
		gfs2_meta_sync(gl);
121
		gfs2_ail_empty_gl(gl);
D
David Teigland 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
	}
}

/**
 * meta_go_inval - invalidate the metadata for this glock
 * @gl: the glock
 * @flags:
 *
 */

static void meta_go_inval(struct gfs2_glock *gl, int flags)
{
	if (!(flags & DIO_METADATA))
		return;

	gfs2_meta_inval(gl);
	gl->gl_vn++;
}

S
Steven Whitehouse 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154
/**
 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
 * @gl: the glock protecting the inode
 *
 */

static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;

	if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
155
		if (ip && !gfs2_is_jdata(ip))
S
Steven Whitehouse 已提交
156
			filemap_fdatawrite(ip->i_inode.i_mapping);
157
		gfs2_log_flush(gl->gl_sbd, gl);
158 159
		if (ip && gfs2_is_jdata(ip))
			filemap_fdatawrite(ip->i_inode.i_mapping);
S
Steven Whitehouse 已提交
160 161 162 163
		gfs2_meta_sync(gl);
		if (ip) {
			struct address_space *mapping = ip->i_inode.i_mapping;
			int error = filemap_fdatawait(mapping);
164
			mapping_set_error(mapping, error);
S
Steven Whitehouse 已提交
165 166 167 168 169 170
		}
		clear_bit(GLF_DIRTY, &gl->gl_flags);
		gfs2_ail_empty_gl(gl);
	}
}

D
David Teigland 已提交
171 172 173 174 175 176 177 178
/**
 * inode_go_xmote_th - promote/demote a glock
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

S
Steven Whitehouse 已提交
179
static void inode_go_xmote_th(struct gfs2_glock *gl)
D
David Teigland 已提交
180 181 182
{
	if (gl->gl_state != LM_ST_UNLOCKED)
		gfs2_pte_inval(gl);
S
Steven Whitehouse 已提交
183 184
	if (gl->gl_state == LM_ST_EXCLUSIVE)
		inode_go_sync(gl);
D
David Teigland 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
}

/**
 * inode_go_xmote_bh - After promoting/demoting a glock
 * @gl: the glock
 *
 */

static void inode_go_xmote_bh(struct gfs2_glock *gl)
{
	struct gfs2_holder *gh = gl->gl_req_gh;
	struct buffer_head *bh;
	int error;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    (!gh || !(gh->gh_flags & GL_SKIP))) {
S
Steven Whitehouse 已提交
201
		error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
D
David Teigland 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		if (!error)
			brelse(bh);
	}
}

/**
 * inode_go_drop_th - unlock a glock
 * @gl: the glock
 *
 * Invoked from rq_demote().
 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
 * is being purged from our node's glock cache; we're dropping lock.
 */

static void inode_go_drop_th(struct gfs2_glock *gl)
{
	gfs2_pte_inval(gl);
S
Steven Whitehouse 已提交
219 220
	if (gl->gl_state == LM_ST_EXCLUSIVE)
		inode_go_sync(gl);
D
David Teigland 已提交
221 222 223 224 225 226 227 228 229 230 231
}

/**
 * inode_go_inval - prepare a inode glock to be released
 * @gl: the glock
 * @flags:
 *
 */

static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
232
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
233 234 235 236
	int meta = (flags & DIO_METADATA);

	if (meta) {
		gfs2_meta_inval(gl);
237 238 239 240 241 242 243
		if (ip)
			set_bit(GIF_INVALID, &ip->i_flags);
	}

	if (ip && S_ISREG(ip->i_inode.i_mode)) {
		truncate_inode_pages(ip->i_inode.i_mapping, 0);
		clear_bit(GIF_PAGED, &ip->i_flags);
D
David Teigland 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	}
}

/**
 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int inode_go_demote_ok(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
	int demote = 0;

259
	if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
D
David Teigland 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
		demote = 1;
	else if (!sdp->sd_args.ar_localcaching &&
		 time_after_eq(jiffies, gl->gl_stamp +
			       gfs2_tune_get(sdp, gt_demote_secs) * HZ))
		demote = 1;

	return demote;
}

/**
 * inode_go_lock - operation done after an inode lock is locked by a process
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int inode_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
280
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
281 282 283 284 285
	int error = 0;

	if (!ip)
		return 0;

286
	if (test_bit(GIF_INVALID, &ip->i_flags)) {
D
David Teigland 已提交
287 288 289 290 291 292 293
		error = gfs2_inode_refresh(ip);
		if (error)
			return error;
	}

	if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
294
	    (gh->gh_state == LM_ST_EXCLUSIVE))
D
David Teigland 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
		error = gfs2_truncatei_resume(ip);

	return error;
}

/**
 * inode_go_unlock - operation done before an inode lock is unlocked by a
 *		     process
 * @gl: the glock
 * @flags:
 *
 */

static void inode_go_unlock(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
311
	struct gfs2_inode *ip = gl->gl_object;
D
David Teigland 已提交
312

313 314
	if (ip)
		gfs2_meta_cache_flush(ip);
D
David Teigland 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
}

/**
 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int rgrp_go_demote_ok(struct gfs2_glock *gl)
{
	return !gl->gl_aspace->i_mapping->nrpages;
}

/**
 * rgrp_go_lock - operation done after an rgrp lock is locked by
 *    a first holder on this node.
 * @gl: the glock
 * @flags:
 *
 * Returns: errno
 */

static int rgrp_go_lock(struct gfs2_holder *gh)
{
340
	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
D
David Teigland 已提交
341 342 343 344 345 346 347 348 349 350 351 352
}

/**
 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
 *    a last holder on this node.
 * @gl: the glock
 * @flags:
 *
 */

static void rgrp_go_unlock(struct gfs2_holder *gh)
{
353
	gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
D
David Teigland 已提交
354 355 356 357 358 359 360 361 362 363
}

/**
 * trans_go_xmote_th - promote/demote the transaction glock
 * @gl: the glock
 * @state: the requested state
 * @flags:
 *
 */

S
Steven Whitehouse 已提交
364
static void trans_go_xmote_th(struct gfs2_glock *gl)
D
David Teigland 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
}

/**
 * trans_go_xmote_bh - After promoting/demoting the transaction glock
 * @gl: the glock
 *
 */

static void trans_go_xmote_bh(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
384
	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
385
	struct gfs2_glock *j_gl = ip->i_gl;
A
Al Viro 已提交
386
	struct gfs2_log_header_host head;
D
David Teigland 已提交
387 388 389 390
	int error;

	if (gl->gl_state != LM_ST_UNLOCKED &&
	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
391
		gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
392
		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
D
David Teigland 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437

		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
		if (error)
			gfs2_consist(sdp);
		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
			gfs2_consist(sdp);

		/*  Initialize some head of the log stuff  */
		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
			sdp->sd_log_sequence = head.lh_sequence + 1;
			gfs2_log_pointers_init(sdp, head.lh_blkno);
		}
	}
}

/**
 * trans_go_drop_th - unlock the transaction glock
 * @gl: the glock
 *
 * We want to sync the device even with localcaching.  Remember
 * that localcaching journal replay only marks buffers dirty.
 */

static void trans_go_drop_th(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
		gfs2_meta_syncfs(sdp);
		gfs2_log_shutdown(sdp);
	}
}

/**
 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int quota_go_demote_ok(struct gfs2_glock *gl)
{
	return !atomic_read(&gl->gl_lvb_count);
}

438
const struct gfs2_glock_operations gfs2_meta_glops = {
S
Steven Whitehouse 已提交
439 440
	.go_xmote_th = meta_go_sync,
	.go_drop_th = meta_go_sync,
441
	.go_type = LM_TYPE_META,
D
David Teigland 已提交
442 443
};

444
const struct gfs2_glock_operations gfs2_inode_glops = {
D
David Teigland 已提交
445 446 447 448 449 450 451
	.go_xmote_th = inode_go_xmote_th,
	.go_xmote_bh = inode_go_xmote_bh,
	.go_drop_th = inode_go_drop_th,
	.go_inval = inode_go_inval,
	.go_demote_ok = inode_go_demote_ok,
	.go_lock = inode_go_lock,
	.go_unlock = inode_go_unlock,
452
	.go_type = LM_TYPE_INODE,
453
	.go_min_hold_time = HZ / 10,
D
David Teigland 已提交
454 455
};

456
const struct gfs2_glock_operations gfs2_rgrp_glops = {
457 458
	.go_xmote_th = meta_go_sync,
	.go_drop_th = meta_go_sync,
D
David Teigland 已提交
459 460 461 462
	.go_inval = meta_go_inval,
	.go_demote_ok = rgrp_go_demote_ok,
	.go_lock = rgrp_go_lock,
	.go_unlock = rgrp_go_unlock,
463
	.go_type = LM_TYPE_RGRP,
464
	.go_min_hold_time = HZ / 10,
D
David Teigland 已提交
465 466
};

467
const struct gfs2_glock_operations gfs2_trans_glops = {
D
David Teigland 已提交
468 469 470
	.go_xmote_th = trans_go_xmote_th,
	.go_xmote_bh = trans_go_xmote_bh,
	.go_drop_th = trans_go_drop_th,
471
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
472 473
};

474
const struct gfs2_glock_operations gfs2_iopen_glops = {
475
	.go_type = LM_TYPE_IOPEN,
D
David Teigland 已提交
476 477
};

478
const struct gfs2_glock_operations gfs2_flock_glops = {
479
	.go_type = LM_TYPE_FLOCK,
D
David Teigland 已提交
480 481
};

482
const struct gfs2_glock_operations gfs2_nondisk_glops = {
483
	.go_type = LM_TYPE_NONDISK,
D
David Teigland 已提交
484 485
};

486
const struct gfs2_glock_operations gfs2_quota_glops = {
D
David Teigland 已提交
487
	.go_demote_ok = quota_go_demote_ok,
488
	.go_type = LM_TYPE_QUOTA,
D
David Teigland 已提交
489 490
};

491
const struct gfs2_glock_operations gfs2_journal_glops = {
492
	.go_type = LM_TYPE_JOURNAL,
D
David Teigland 已提交
493 494
};