glock.c 53.8 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9
 */

10 11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

D
David Teigland 已提交
12 13 14 15 16 17
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
18
#include <linux/hash.h>
D
David Teigland 已提交
19
#include <linux/jhash.h>
S
Steven Whitehouse 已提交
20
#include <linux/kallsyms.h>
21
#include <linux/gfs2_ondisk.h>
22
#include <linux/list.h>
23
#include <linux/wait.h>
A
akpm@linux-foundation.org 已提交
24
#include <linux/module.h>
25
#include <linux/uaccess.h>
26 27
#include <linux/seq_file.h>
#include <linux/debugfs.h>
28 29
#include <linux/kthread.h>
#include <linux/freezer.h>
30 31
#include <linux/workqueue.h>
#include <linux/jiffies.h>
32 33 34
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
35
#include <linux/percpu.h>
36
#include <linux/list_sort.h>
S
Steven Whitehouse 已提交
37
#include <linux/lockref.h>
38
#include <linux/rhashtable.h>
D
David Teigland 已提交
39 40

#include "gfs2.h"
41
#include "incore.h"
D
David Teigland 已提交
42 43 44 45 46 47 48
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
49
#include "util.h"
50
#include "bmap.h"
S
Steven Whitehouse 已提交
51 52
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
D
David Teigland 已提交
53

54
struct gfs2_glock_iter {
55
	struct gfs2_sbd *sdp;		/* incore superblock           */
56
	struct rhashtable_iter hti;	/* rhashtable iterator         */
57 58
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
59 60
};

D
David Teigland 已提交
61 62
typedef void (*glock_examiner) (struct gfs2_glock * gl);

63
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64

65
static struct dentry *gfs2_root;
66
static struct workqueue_struct *glock_workqueue;
67
struct workqueue_struct *gfs2_delete_workqueue;
68 69
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
J
Julia Lawall 已提交
70
static DEFINE_SPINLOCK(lru_lock);
71

72
#define GFS2_GL_HASH_SHIFT      15
F
Fabian Frederick 已提交
73
#define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
74

A
Arvind Yadav 已提交
75
static const struct rhashtable_params ht_parms = {
76
	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77
	.key_len = offsetofend(struct lm_lockname, ln_type),
78 79 80
	.key_offset = offsetof(struct gfs2_glock, gl_name),
	.head_offset = offsetof(struct gfs2_glock, gl_node),
};
D
David Teigland 已提交
81

82
static struct rhashtable gl_hash_table;
D
David Teigland 已提交
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define GLOCK_WAIT_TABLE_BITS 12
#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;

struct wait_glock_queue {
	struct lm_lockname *name;
	wait_queue_entry_t wait;
};

static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
			       int sync, void *key)
{
	struct wait_glock_queue *wait_glock =
		container_of(wait, struct wait_glock_queue, wait);
	struct lm_lockname *wait_name = wait_glock->name;
	struct lm_lockname *wake_name = key;

	if (wake_name->ln_sbd != wait_name->ln_sbd ||
	    wake_name->ln_number != wait_name->ln_number ||
	    wake_name->ln_type != wait_name->ln_type)
		return 0;
	return autoremove_wake_function(wait, mode, sync, key);
}

static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
{
	u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);

	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
}

/**
 * wake_up_glock  -  Wake up waiters on a glock
 * @gl: the glock
 */
static void wake_up_glock(struct gfs2_glock *gl)
{
	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);

	if (waitqueue_active(wq))
		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
}

127
static void gfs2_glock_dealloc(struct rcu_head *rcu)
D
David Teigland 已提交
128
{
129
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
D
David Teigland 已提交
130

131
	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
132
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133
	} else {
134
		kfree(gl->gl_lksb.sb_lvbptr);
135
		kmem_cache_free(gfs2_glock_cachep, gl);
136
	}
137 138 139 140 141 142
}

void gfs2_glock_free(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;

143 144 145
	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
	smp_mb();
	wake_up_glock(gl);
146
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
147 148
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
D
David Teigland 已提交
149 150 151 152 153 154 155 156
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

157
void gfs2_glock_hold(struct gfs2_glock *gl)
D
David Teigland 已提交
158
{
S
Steven Whitehouse 已提交
159 160
	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
	lockref_get(&gl->gl_lockref);
D
David Teigland 已提交
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174 175
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
176
	if (!list_empty(&gl->gl_holders))
177 178 179 180 181 182
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

183

184 185 186 187 188 189 190 191 192 193
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
194
	set_bit(GLF_LRU, &gl->gl_flags);
195 196 197
	spin_unlock(&lru_lock);
}

198
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
199
{
200 201 202
	if (!(gl->gl_ops->go_flags & GLOF_LRU))
		return;

203
	spin_lock(&lru_lock);
204 205 206 207 208 209 210 211
	if (!list_empty(&gl->gl_lru)) {
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);
		clear_bit(GLF_LRU, &gl->gl_flags);
	}
	spin_unlock(&lru_lock);
}

212 213 214
/*
 * Enqueue the glock on the work queue.  Passes one glock reference on to the
 * work queue.
D
David Teigland 已提交
215
 */
216 217 218 219 220 221 222 223 224 225 226 227
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
	if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
		/*
		 * We are holding the lockref spinlock, and the work was still
		 * queued above.  The queued work (glock_work_func) takes that
		 * spinlock before dropping its glock reference(s), so it
		 * cannot have dropped them in the meantime.
		 */
		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
		gl->gl_lockref.count--;
	}
}
D
David Teigland 已提交
228

229 230 231 232 233 234 235
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
	spin_lock(&gl->gl_lockref.lock);
	__gfs2_glock_queue_work(gl, delay);
	spin_unlock(&gl->gl_lockref.lock);
}

static void __gfs2_glock_put(struct gfs2_glock *gl)
D
David Teigland 已提交
236
{
237
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
238
	struct address_space *mapping = gfs2_glock2aspace(gl);
D
David Teigland 已提交
239

S
Steven Whitehouse 已提交
240 241
	lockref_mark_dead(&gl->gl_lockref);

242
	gfs2_glock_remove_from_lru(gl);
S
Steven Whitehouse 已提交
243 244 245 246 247
	spin_unlock(&gl->gl_lockref.lock);
	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
	trace_gfs2_glock_put(gl);
	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
D
David Teigland 已提交
248 249
}

250 251 252 253 254 255 256 257
/*
 * Cause the glock to be put in work queue context.
 */
void gfs2_glock_queue_put(struct gfs2_glock *gl)
{
	gfs2_glock_queue_work(gl, 0);
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

void gfs2_glock_put(struct gfs2_glock *gl)
{
	if (lockref_put_or_lock(&gl->gl_lockref))
		return;

	__gfs2_glock_put(gl);
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
290 291 292 293 294 295
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
296 297 298 299 300 301 302 303
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
304
	smp_mb__after_atomic();
305 306 307
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

308 309 310 311 312
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

313
static void do_error(struct gfs2_glock *gl, const int ret)
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

332 333 334 335
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
336 337
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
338 339 340
 */

static int do_promote(struct gfs2_glock *gl)
A
Andreas Gruenbacher 已提交
341 342
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
343 344 345 346 347 348 349 350 351 352 353 354
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
A
Andreas Gruenbacher 已提交
355
				spin_unlock(&gl->gl_lockref.lock);
356 357
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
A
Andreas Gruenbacher 已提交
358
				spin_lock(&gl->gl_lockref.lock);
359
				if (ret) {
360 361
					if (ret == 1)
						return 2;
362 363
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
S
Steven Whitehouse 已提交
364
					trace_gfs2_glock_queue(gh, 0);
365 366 367 368
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
S
Steven Whitehouse 已提交
369
				trace_gfs2_promote(gh, 1);
370 371 372 373
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
S
Steven Whitehouse 已提交
374
			trace_gfs2_promote(gh, 0);
375 376 377 378 379
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
380
		do_error(gl, 0);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
S
Steven Whitehouse 已提交
417
		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
418
		if (held2)
S
Steven Whitehouse 已提交
419
			gl->gl_lockref.count++;
420
		else
S
Steven Whitehouse 已提交
421
			gl->gl_lockref.count--;
422
	}
423 424
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
425

426 427 428 429
	if (new_state != gl->gl_target)
		/* shorten our minimum hold time */
		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
				       GL_GLOCK_MIN_HOLD);
430 431 432 433 434 435 436 437
	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
438
	smp_mb__after_atomic();
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
454
	int rv;
455

A
Andreas Gruenbacher 已提交
456
	spin_lock(&gl->gl_lockref.lock);
S
Steven Whitehouse 已提交
457
	trace_gfs2_glock_state_change(gl, state);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
497
			pr_err("wanted %u got %u\n", gl->gl_target, state);
498 499
			GLOCK_BUG_ON(gl, 1);
		}
A
Andreas Gruenbacher 已提交
500
		spin_unlock(&gl->gl_lockref.lock);
501 502 503 504 505 506 507 508
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
A
Andreas Gruenbacher 已提交
509
			spin_unlock(&gl->gl_lockref.lock);
510
			rv = glops->go_xmote_bh(gl, gh);
A
Andreas Gruenbacher 已提交
511
			spin_lock(&gl->gl_lockref.lock);
512 513 514 515 516
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
517 518 519
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
520 521 522
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
523
out_locked:
A
Andreas Gruenbacher 已提交
524
	spin_unlock(&gl->gl_lockref.lock);
525 526 527 528 529 530 531 532 533 534 535
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
A
Andreas Gruenbacher 已提交
536 537
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
538 539
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
540
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
B
Bob Peterson 已提交
541
	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
542 543
	int ret;

544 545
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
	    target != LM_ST_UNLOCKED)
546
		return;
547 548
	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
549 550
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
551 552 553 554 555
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
556
	gl->gl_req = target;
557 558 559 560 561
	set_bit(GLF_BLOCKING, &gl->gl_flags);
	if ((gl->gl_req == LM_ST_UNLOCKED) ||
	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
		clear_bit(GLF_BLOCKING, &gl->gl_flags);
A
Andreas Gruenbacher 已提交
562
	spin_unlock(&gl->gl_lockref.lock);
563 564
	if (glops->go_sync)
		glops->go_sync(gl);
565 566 567 568 569
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
570 571 572
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
573 574 575 576
		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
		    target == LM_ST_UNLOCKED &&
		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
			finish_xmote(gl, target);
577
			gfs2_glock_queue_work(gl, 0);
578 579
		}
		else if (ret) {
580
			pr_err("lm_lock ret %d\n", ret);
581 582
			GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
						   &sdp->sd_flags));
583
		}
584 585
	} else { /* lock_nolock */
		finish_xmote(gl, target);
586
		gfs2_glock_queue_work(gl, 0);
587
	}
588

A
Andreas Gruenbacher 已提交
589
	spin_lock(&gl->gl_lockref.lock);
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
A
Andreas Gruenbacher 已提交
617 618
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
619 620
{
	struct gfs2_holder *gh = NULL;
621
	int ret;
622 623 624 625 626 627 628 629 630

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
631
			goto out_unlock;
632 633 634
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
635
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
636 637 638 639
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
640 641
		ret = do_promote(gl);
		if (ret == 0)
642
			goto out_unlock;
643
		if (ret == 2)
644
			goto out;
645 646 647 648 649 650
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
651
out:
652 653 654
	return;

out_sched:
655
	clear_bit(GLF_LOCK, &gl->gl_flags);
656
	smp_mb__after_atomic();
S
Steven Whitehouse 已提交
657
	gl->gl_lockref.count++;
658
	__gfs2_glock_queue_work(gl, 0);
659 660
	return;

661
out_unlock:
662
	clear_bit(GLF_LOCK, &gl->gl_flags);
663
	smp_mb__after_atomic();
664
	return;
665 666
}

667 668 669
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
670
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
671
	struct inode *inode;
672 673
	u64 no_addr = gl->gl_name.ln_number;

674 675 676 677 678 679
	/* If someone's using this glock to create a new dinode, the block must
	   have been freed by another node, then re-used, in which case our
	   iopen callback is too late after the fact. Ignore it. */
	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
		goto out;

680
	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
681 682 683
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
684
	}
685
out:
686 687 688
	gfs2_glock_put(gl);
}

689 690
static void glock_work_func(struct work_struct *work)
{
691
	unsigned long delay = 0;
692
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
693
	unsigned int drop_refs = 1;
694

695
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
696
		finish_xmote(gl, gl->gl_reply);
697
		drop_refs++;
698
	}
A
Andreas Gruenbacher 已提交
699
	spin_lock(&gl->gl_lockref.lock);
700
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
701 702
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
703
		unsigned long holdtime, now = jiffies;
704

705
		holdtime = gl->gl_tchange + gl->gl_hold_time;
706 707
		if (time_before(now, holdtime))
			delay = holdtime - now;
708 709 710 711 712

		if (!delay) {
			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
			set_bit(GLF_DEMOTE, &gl->gl_flags);
		}
713 714
	}
	run_queue(gl, 0);
715 716 717
	if (delay) {
		/* Keep one glock reference for the work we requeue. */
		drop_refs--;
718 719
		if (gl->gl_name.ln_type != LM_TYPE_INODE)
			delay = 0;
720
		__gfs2_glock_queue_work(gl, delay);
721
	}
722 723 724 725 726 727 728 729 730 731 732 733

	/*
	 * Drop the remaining glock references manually here. (Mind that
	 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
	 * here as well.)
	 */
	gl->gl_lockref.count -= drop_refs;
	if (!gl->gl_lockref.count) {
		__gfs2_glock_put(gl);
		return;
	}
	spin_unlock(&gl->gl_lockref.lock);
734 735
}

736 737 738 739
static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
					    struct gfs2_glock *new)
{
	struct wait_glock_queue wait;
740
	wait_queue_head_t *wq = glock_waitqueue(name);
741 742
	struct gfs2_glock *gl;

743 744 745 746
	wait.name = name;
	init_wait(&wait.wait);
	wait.wait.func = glock_wake_function;

747
again:
748
	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	rcu_read_lock();
	if (new) {
		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
			&new->gl_node, ht_parms);
		if (IS_ERR(gl))
			goto out;
	} else {
		gl = rhashtable_lookup_fast(&gl_hash_table,
			name, ht_parms);
	}
	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
		rcu_read_unlock();
		schedule();
		goto again;
	}
out:
	rcu_read_unlock();
766
	finish_wait(wq, &wait.wait);
767 768 769
	return gl;
}

D
David Teigland 已提交
770 771 772 773 774 775 776 777 778 779 780 781 782
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

783
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
784
		   const struct gfs2_glock_operations *glops, int create,
D
David Teigland 已提交
785 786
		   struct gfs2_glock **glp)
{
787
	struct super_block *s = sdp->sd_vfs;
788 789 790
	struct lm_lockname name = { .ln_number = number,
				    .ln_type = glops->go_type,
				    .ln_sbd = sdp };
791
	struct gfs2_glock *gl, *tmp;
792
	struct address_space *mapping;
793
	struct kmem_cache *cachep;
794
	int ret = 0;
D
David Teigland 已提交
795

796 797 798
	gl = find_insert_glock(&name, NULL);
	if (gl) {
		*glp = gl;
D
David Teigland 已提交
799
		return 0;
800
	}
801 802
	if (!create)
		return -ENOENT;
D
David Teigland 已提交
803

804
	if (glops->go_flags & GLOF_ASPACE)
805
		cachep = gfs2_glock_aspace_cachep;
806
	else
807
		cachep = gfs2_glock_cachep;
808
	gl = kmem_cache_alloc(cachep, GFP_NOFS);
D
David Teigland 已提交
809 810 811
	if (!gl)
		return -ENOMEM;

812 813 814
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));

	if (glops->go_flags & GLOF_LVB) {
815
		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
816
		if (!gl->gl_lksb.sb_lvbptr) {
817 818 819 820 821
			kmem_cache_free(cachep, gl);
			return -ENOMEM;
		}
	}

822
	atomic_inc(&sdp->sd_glock_disposal);
823
	gl->gl_node.next = NULL;
824
	gl->gl_flags = 0;
D
David Teigland 已提交
825
	gl->gl_name = name;
S
Steven Whitehouse 已提交
826
	gl->gl_lockref.count = 1;
D
David Teigland 已提交
827
	gl->gl_state = LM_ST_UNLOCKED;
828
	gl->gl_target = LM_ST_UNLOCKED;
829
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
D
David Teigland 已提交
830
	gl->gl_ops = glops;
T
Thomas Gleixner 已提交
831
	gl->gl_dstamp = 0;
832 833 834 835 836 837
	preempt_disable();
	/* We use the global stats to estimate the initial per-glock stats */
	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
	preempt_enable();
	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
838
	gl->gl_tchange = jiffies;
839
	gl->gl_object = NULL;
840
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
841
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
842
	INIT_WORK(&gl->gl_delete, delete_work_func);
D
David Teigland 已提交
843

844 845 846 847 848 849
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
850
		mapping->private_data = NULL;
851
		mapping->writeback_index = 0;
D
David Teigland 已提交
852 853
	}

854
	tmp = find_insert_glock(&name, gl);
855
	if (!tmp) {
856
		*glp = gl;
857
		goto out;
D
David Teigland 已提交
858
	}
859 860 861 862
	if (IS_ERR(tmp)) {
		ret = PTR_ERR(tmp);
		goto out_free;
	}
863
	*glp = tmp;
864 865

out_free:
866 867 868
	kfree(gl->gl_lksb.sb_lvbptr);
	kmem_cache_free(cachep, gl);
	atomic_dec(&sdp->sd_glock_disposal);
D
David Teigland 已提交
869

870
out:
871
	return ret;
D
David Teigland 已提交
872 873 874 875 876 877 878 879 880 881 882
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

B
Bob Peterson 已提交
883
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
D
David Teigland 已提交
884 885 886 887
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
888
	gh->gh_ip = _RET_IP_;
889
	gh->gh_owner_pid = get_pid(task_pid(current));
D
David Teigland 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

B
Bob Peterson 已提交
907
void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
D
David Teigland 已提交
908 909
{
	gh->gh_state = state;
910
	gh->gh_flags = flags;
911
	gh->gh_iflags = 0;
912
	gh->gh_ip = _RET_IP_;
913
	put_pid(gh->gh_owner_pid);
B
Bob Peterson 已提交
914
	gh->gh_owner_pid = get_pid(task_pid(current));
D
David Teigland 已提交
915 916 917 918 919 920 921 922 923 924
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
925
	put_pid(gh->gh_owner_pid);
D
David Teigland 已提交
926
	gfs2_glock_put(gh->gh_gl);
A
Andreas Gruenbacher 已提交
927
	gfs2_holder_mark_uninitialized(gh);
S
Steven Whitehouse 已提交
928
	gh->gh_ip = 0;
D
David Teigland 已提交
929 930
}

931 932 933 934 935 936 937 938
/**
 * gfs2_glock_wait - wait on a glock acquisition
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

int gfs2_glock_wait(struct gfs2_holder *gh)
939
{
940 941
	unsigned long time1 = jiffies;

942
	might_sleep();
943
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
944 945 946 947 948
	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
		/* Lengthen the minimum hold time. */
		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
					      GL_GLOCK_HOLD_INCR,
					      GL_GLOCK_MAX_HOLD);
949
	return gh->gh_error;
950 951
}

D
David Teigland 已提交
952
/**
953 954 955
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
D
David Teigland 已提交
956
 *
957 958
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
D
David Teigland 已提交
959 960
 */

961
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
962
			    unsigned long delay, bool remote)
D
David Teigland 已提交
963
{
964
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
D
David Teigland 已提交
965

966 967 968 969 970 971 972
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
D
David Teigland 已提交
973
	}
974
	if (gl->gl_ops->go_callback)
975
		gl->gl_ops->go_callback(gl, remote);
976
	trace_gfs2_demote_rq(gl, remote);
D
David Teigland 已提交
977 978
}

979
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
980
{
981
	struct va_format vaf;
982 983 984
	va_list args;

	va_start(args, fmt);
985

986
	if (seq) {
987
		seq_vprintf(seq, fmt, args);
988
	} else {
989 990 991
		vaf.fmt = fmt;
		vaf.va = &args;

992
		pr_err("%pV", &vaf);
993
	}
994

995 996 997
	va_end(args);
}

D
David Teigland 已提交
998 999 1000 1001
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
1002 1003 1004 1005
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
D
David Teigland 已提交
1006 1007
 */

1008
static inline void add_to_queue(struct gfs2_holder *gh)
A
Andreas Gruenbacher 已提交
1009 1010
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
D
David Teigland 已提交
1011 1012
{
	struct gfs2_glock *gl = gh->gh_gl;
1013
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1014 1015
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
1016
	int try_futile = 0;
D
David Teigland 已提交
1017

1018
	BUG_ON(gh->gh_owner_pid == NULL);
1019 1020
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
1021

1022 1023
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
1024
			try_futile = !may_grant(gl, gh);
1025 1026 1027 1028 1029 1030 1031 1032
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
1033 1034
		if (try_futile &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1035 1036 1037 1038
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
1039
		}
1040 1041 1042 1043 1044
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
1045
	set_bit(GLF_QUEUED, &gl->gl_flags);
1046
	trace_gfs2_glock_queue(gh, 1);
1047 1048
	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
A
Andreas Gruenbacher 已提交
1059
		spin_unlock(&gl->gl_lockref.lock);
S
Steven Whitehouse 已提交
1060
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1061
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
A
Andreas Gruenbacher 已提交
1062
		spin_lock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1063
	}
1064
	return;
D
David Teigland 已提交
1065

1066
trap_recursive:
1067 1068 1069
	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
	pr_err("lock type: %d req lock state : %d\n",
1070
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1071 1072 1073
	pr_err("new: %pSR\n", (void *)gh->gh_ip);
	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
	pr_err("lock type: %d req lock state : %d\n",
1074
	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1075
	gfs2_dump_glock(NULL, gl);
1076
	BUG();
D
David Teigland 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
1091
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
D
David Teigland 已提交
1092 1093
	int error = 0;

1094
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
D
David Teigland 已提交
1095 1096
		return -EIO;

1097 1098 1099
	if (test_bit(GLF_LRU, &gl->gl_flags))
		gfs2_glock_remove_from_lru(gl);

A
Andreas Gruenbacher 已提交
1100
	spin_lock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1101
	add_to_queue(gh);
1102 1103
	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1104
		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1105
		gl->gl_lockref.count++;
1106
		__gfs2_glock_queue_work(gl, 0);
1107
	}
1108
	run_queue(gl, 1);
A
Andreas Gruenbacher 已提交
1109
	spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1110

1111 1112
	if (!(gh->gh_flags & GL_ASYNC))
		error = gfs2_glock_wait(gh);
D
David Teigland 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125

	return error;
}

/**
 * gfs2_glock_poll - poll to see if an async request has been completed
 * @gh: the holder
 *
 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 */

int gfs2_glock_poll(struct gfs2_holder *gh)
{
1126
	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
D
David Teigland 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
}

/**
 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 * @gh: the glock holder
 *
 */

void gfs2_glock_dq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
1138
	const struct gfs2_glock_operations *glops = gl->gl_ops;
1139
	unsigned delay = 0;
1140
	int fast_path = 0;
D
David Teigland 已提交
1141

A
Andreas Gruenbacher 已提交
1142
	spin_lock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1143
	if (gh->gh_flags & GL_NOCACHE)
1144
		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
D
David Teigland 已提交
1145 1146

	list_del_init(&gh->gh_list);
1147
	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1148
	if (find_first_holder(gl) == NULL) {
1149
		if (glops->go_unlock) {
1150
			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
A
Andreas Gruenbacher 已提交
1151
			spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1152
			glops->go_unlock(gh);
A
Andreas Gruenbacher 已提交
1153
			spin_lock(&gl->gl_lockref.lock);
1154
			clear_bit(GLF_LOCK, &gl->gl_flags);
1155
		}
1156 1157 1158 1159
		if (list_empty(&gl->gl_holders) &&
		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
			fast_path = 1;
D
David Teigland 已提交
1160
	}
1161 1162
	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
	    (glops->go_flags & GLOF_LRU))
1163 1164
		gfs2_glock_add_to_lru(gl);

S
Steven Whitehouse 已提交
1165
	trace_gfs2_glock_queue(gh, 0);
1166 1167 1168 1169 1170 1171 1172 1173
	if (unlikely(!fast_path)) {
		gl->gl_lockref.count++;
		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
		    gl->gl_name.ln_type == LM_TYPE_INODE)
			delay = gl->gl_hold_time;
		__gfs2_glock_queue_work(gl, delay);
	}
A
Andreas Gruenbacher 已提交
1174
	spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1175 1176
}

A
Abhijith Das 已提交
1177 1178 1179 1180
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	gfs2_glock_dq(gh);
1181
	might_sleep();
1182
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
A
Abhijith Das 已提交
1183 1184
}

D
David Teigland 已提交
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
/**
 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
 * @gh: the holder structure
 *
 */

void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
{
	gfs2_glock_dq(gh);
	gfs2_holder_uninit(gh);
}

/**
 * gfs2_glock_nq_num - acquire a glock based on lock number
 * @sdp: the filesystem
 * @number: the lock number
 * @glops: the glock operations for the type of glock
 * @state: the state to acquire the glock in
L
Lucas De Marchi 已提交
1203
 * @flags: modifier flags for the acquisition
D
David Teigland 已提交
1204 1205 1206 1207 1208
 * @gh: the struct gfs2_holder
 *
 * Returns: errno
 */

1209
int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1210
		      const struct gfs2_glock_operations *glops,
B
Bob Peterson 已提交
1211
		      unsigned int state, u16 flags, struct gfs2_holder *gh)
D
David Teigland 已提交
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
{
	struct gfs2_glock *gl;
	int error;

	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
	if (!error) {
		error = gfs2_glock_nq_init(gl, state, flags, gh);
		gfs2_glock_put(gl);
	}

	return error;
}

/**
 * glock_compare - Compare two struct gfs2_glock structures for sorting
 * @arg_a: the first structure
 * @arg_b: the second structure
 *
 */

static int glock_compare(const void *arg_a, const void *arg_b)
{
1234 1235 1236 1237
	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
D
David Teigland 已提交
1238 1239

	if (a->ln_number > b->ln_number)
1240 1241 1242
		return 1;
	if (a->ln_number < b->ln_number)
		return -1;
1243
	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1244
	return 0;
D
David Teigland 已提交
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
}

/**
 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 *
 * Returns: 0 on success (all glocks acquired),
 *          errno on failure (no glocks acquired)
 */

static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
		     struct gfs2_holder **p)
{
	unsigned int x;
	int error = 0;

	for (x = 0; x < num_gh; x++)
		p[x] = &ghs[x];

	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);

	for (x = 0; x < num_gh; x++) {
		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);

		error = gfs2_glock_nq(p[x]);
		if (error) {
			while (x--)
				gfs2_glock_dq(p[x]);
			break;
		}
	}

	return error;
}

/**
 * gfs2_glock_nq_m - acquire multiple glocks
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 *
 *
 * Returns: 0 on success (all glocks acquired),
 *          errno on failure (no glocks acquired)
 */

int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{
1293 1294
	struct gfs2_holder *tmp[4];
	struct gfs2_holder **pph = tmp;
D
David Teigland 已提交
1295 1296
	int error = 0;

1297 1298
	switch(num_gh) {
	case 0:
D
David Teigland 已提交
1299
		return 0;
1300
	case 1:
D
David Teigland 已提交
1301 1302
		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
		return gfs2_glock_nq(ghs);
1303 1304
	default:
		if (num_gh <= 4)
D
David Teigland 已提交
1305
			break;
1306 1307 1308
		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
		if (!pph)
			return -ENOMEM;
D
David Teigland 已提交
1309 1310
	}

1311
	error = nq_m_sync(num_gh, ghs, pph);
D
David Teigland 已提交
1312

1313 1314
	if (pph != tmp)
		kfree(pph);
D
David Teigland 已提交
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327

	return error;
}

/**
 * gfs2_glock_dq_m - release multiple glocks
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 *
 */

void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{
1328 1329
	while (num_gh--)
		gfs2_glock_dq(&ghs[num_gh]);
D
David Teigland 已提交
1330 1331
}

1332
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1333
{
1334 1335 1336
	unsigned long delay = 0;
	unsigned long holdtime;
	unsigned long now = jiffies;
D
David Teigland 已提交
1337

1338
	gfs2_glock_hold(gl);
1339 1340 1341
	holdtime = gl->gl_tchange + gl->gl_hold_time;
	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1342 1343 1344
		if (time_before(now, holdtime))
			delay = holdtime - now;
		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1345
			delay = gl->gl_hold_time;
1346
	}
D
David Teigland 已提交
1347

A
Andreas Gruenbacher 已提交
1348
	spin_lock(&gl->gl_lockref.lock);
1349
	handle_callback(gl, state, delay, true);
1350
	__gfs2_glock_queue_work(gl, delay);
A
Andreas Gruenbacher 已提交
1351
	spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1352 1353
}

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
/**
 * gfs2_should_freeze - Figure out if glock should be frozen
 * @gl: The glock in question
 *
 * Glocks are not frozen if (a) the result of the dlm operation is
 * an error, (b) the locking operation was an unlock operation or
 * (c) if there is a "noexp" flagged request anywhere in the queue
 *
 * Returns: 1 if freezing should occur, 0 otherwise
 */

static int gfs2_should_freeze(const struct gfs2_glock *gl)
{
	const struct gfs2_holder *gh;

	if (gl->gl_reply & ~LM_OUT_ST_MASK)
		return 0;
	if (gl->gl_target == LM_ST_UNLOCKED)
		return 0;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (LM_FLAG_NOEXP & gh->gh_flags)
			return 0;
	}

	return 1;
}

D
David Teigland 已提交
1384
/**
1385 1386 1387
 * gfs2_glock_complete - Callback used by locking
 * @gl: Pointer to the glock
 * @ret: The return value from the dlm
D
David Teigland 已提交
1388
 *
A
Andreas Gruenbacher 已提交
1389
 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1390
 * to use a bitfield shared with other glock state fields.
D
David Teigland 已提交
1391 1392
 */

1393
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
D
David Teigland 已提交
1394
{
1395
	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1396

A
Andreas Gruenbacher 已提交
1397
	spin_lock(&gl->gl_lockref.lock);
1398
	gl->gl_reply = ret;
1399

1400
	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1401
		if (gfs2_should_freeze(gl)) {
1402
			set_bit(GLF_FROZEN, &gl->gl_flags);
A
Andreas Gruenbacher 已提交
1403
			spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1404
			return;
1405
		}
D
David Teigland 已提交
1406
	}
1407

S
Steven Whitehouse 已提交
1408
	gl->gl_lockref.count++;
1409
	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1410
	__gfs2_glock_queue_work(gl, 0);
A
Andreas Gruenbacher 已提交
1411
	spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1412 1413
}

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct gfs2_glock *gla, *glb;

	gla = list_entry(a, struct gfs2_glock, gl_lru);
	glb = list_entry(b, struct gfs2_glock, gl_lru);

	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
		return 1;
	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
		return -1;

	return 0;
}

/**
 * gfs2_dispose_glock_lru - Demote a list of glocks
 * @list: The list to dispose of
 *
 * Disposing of glocks may involve disk accesses, so that here we sort
 * the glocks by number (i.e. disk location of the inodes) so that if
 * there are any such accesses, they'll be sent in order (mostly).
 *
 * Must be called under the lru_lock, but may drop and retake this
 * lock. While the lru_lock is dropped, entries may vanish from the
 * list, but no new entries will appear on the list (since it is
 * private)
 */

static void gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock)
__acquires(&lru_lock)
{
	struct gfs2_glock *gl;

	list_sort(NULL, list, glock_cmp);

	while(!list_empty(list)) {
		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
		list_del_init(&gl->gl_lru);
A
Andreas Gruenbacher 已提交
1454
		if (!spin_trylock(&gl->gl_lockref.lock)) {
1455
add_back_to_lru:
S
Steven Whitehouse 已提交
1456 1457 1458 1459
			list_add(&gl->gl_lru, &lru_list);
			atomic_inc(&lru_count);
			continue;
		}
1460
		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
A
Andreas Gruenbacher 已提交
1461
			spin_unlock(&gl->gl_lockref.lock);
1462 1463
			goto add_back_to_lru;
		}
1464
		clear_bit(GLF_LRU, &gl->gl_flags);
S
Steven Whitehouse 已提交
1465
		gl->gl_lockref.count++;
1466
		if (demote_ok(gl))
1467
			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1468
		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1469
		__gfs2_glock_queue_work(gl, 0);
A
Andreas Gruenbacher 已提交
1470
		spin_unlock(&gl->gl_lockref.lock);
1471
		cond_resched_lock(&lru_lock);
1472 1473 1474
	}
}

1475 1476 1477 1478
/**
 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
 * @nr: The number of entries to scan
 *
1479 1480 1481
 * This function selects the entries on the LRU which are able to
 * be demoted, and then kicks off the process by calling
 * gfs2_dispose_glock_lru() above.
1482
 */
D
David Teigland 已提交
1483

1484
static long gfs2_scan_glock_lru(int nr)
D
David Teigland 已提交
1485 1486
{
	struct gfs2_glock *gl;
1487
	LIST_HEAD(skipped);
1488
	LIST_HEAD(dispose);
1489
	long freed = 0;
D
David Teigland 已提交
1490

1491
	spin_lock(&lru_lock);
1492
	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1493 1494 1495
		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);

		/* Test for being demotable */
1496
		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1497 1498
			list_move(&gl->gl_lru, &dispose);
			atomic_dec(&lru_count);
1499
			freed++;
S
Steven Whitehouse 已提交
1500
			continue;
1501
		}
1502 1503

		list_move(&gl->gl_lru, &skipped);
D
David Teigland 已提交
1504
	}
1505
	list_splice(&skipped, &lru_list);
1506 1507
	if (!list_empty(&dispose))
		gfs2_dispose_glock_lru(&dispose);
1508
	spin_unlock(&lru_lock);
1509 1510

	return freed;
1511 1512
}

1513 1514
static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
					    struct shrink_control *sc)
1515
{
1516 1517 1518 1519
	if (!(sc->gfp_mask & __GFP_FS))
		return SHRINK_STOP;
	return gfs2_scan_glock_lru(sc->nr_to_scan);
}
1520

1521 1522 1523
static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
					     struct shrink_control *sc)
{
1524
	return vfs_pressure_ratio(atomic_read(&lru_count));
D
David Teigland 已提交
1525 1526
}

1527 1528
static struct shrinker glock_shrinker = {
	.seeks = DEFAULT_SEEKS,
1529 1530
	.count_objects = gfs2_glock_shrink_count,
	.scan_objects = gfs2_glock_shrink_scan,
1531 1532
};

D
David Teigland 已提交
1533 1534 1535 1536 1537 1538
/**
 * examine_bucket - Call a function for glock in a hash bucket
 * @examiner: the function
 * @sdp: the filesystem
 * @bucket: the bucket
 *
1539 1540 1541
 * Note that the function can be called multiple times on the same
 * object.  So the user must ensure that the function can cope with
 * that.
D
David Teigland 已提交
1542 1543
 */

1544
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
D
David Teigland 已提交
1545
{
1546
	struct gfs2_glock *gl;
1547 1548 1549 1550 1551 1552
	struct rhashtable_iter iter;

	rhashtable_walk_enter(&gl_hash_table, &iter);

	do {
		gl = ERR_PTR(rhashtable_walk_start(&iter));
B
Bob Peterson 已提交
1553 1554
		if (IS_ERR(gl))
			goto walk_stop;
1555 1556

		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
B
Bob Peterson 已提交
1557
			if (gl->gl_name.ln_sbd == sdp &&
1558 1559
			    lockref_get_not_dead(&gl->gl_lockref))
				examiner(gl);
1560

B
Bob Peterson 已提交
1561
walk_stop:
1562 1563 1564 1565
		rhashtable_walk_stop(&iter);
	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));

	rhashtable_walk_exit(&iter);
1566 1567
}

1568 1569 1570 1571 1572 1573 1574 1575
/**
 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
 * @gl: The glock to thaw
 *
 */

static void thaw_glock(struct gfs2_glock *gl)
{
1576
	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1577
		gfs2_glock_put(gl);
1578
		return;
1579
	}
1580 1581
	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
	gfs2_glock_queue_work(gl, 0);
1582 1583
}

D
David Teigland 已提交
1584 1585 1586 1587 1588 1589 1590 1591
/**
 * clear_glock - look at a glock and see if we can free it from glock cache
 * @gl: the glock to look at
 *
 */

static void clear_glock(struct gfs2_glock *gl)
{
1592
	gfs2_glock_remove_from_lru(gl);
D
David Teigland 已提交
1593

A
Andreas Gruenbacher 已提交
1594
	spin_lock(&gl->gl_lockref.lock);
1595
	if (gl->gl_state != LM_ST_UNLOCKED)
1596
		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1597
	__gfs2_glock_queue_work(gl, 0);
A
Andreas Gruenbacher 已提交
1598
	spin_unlock(&gl->gl_lockref.lock);
D
David Teigland 已提交
1599 1600
}

1601 1602 1603 1604 1605 1606 1607 1608
/**
 * gfs2_glock_thaw - Thaw any frozen glocks
 * @sdp: The super block
 *
 */

void gfs2_glock_thaw(struct gfs2_sbd *sdp)
{
1609 1610
	glock_hash_walk(thaw_glock, sdp);
}
1611

1612
static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1613
{
A
Andreas Gruenbacher 已提交
1614
	spin_lock(&gl->gl_lockref.lock);
1615
	gfs2_dump_glock(seq, gl);
A
Andreas Gruenbacher 已提交
1616
	spin_unlock(&gl->gl_lockref.lock);
1617 1618 1619 1620 1621
}

static void dump_glock_func(struct gfs2_glock *gl)
{
	dump_glock(NULL, gl);
1622 1623
}

D
David Teigland 已提交
1624 1625 1626 1627 1628
/**
 * gfs2_gl_hash_clear - Empty out the glock hash table
 * @sdp: the filesystem
 * @wait: wait until it's all gone
 *
1629
 * Called when unmounting the filesystem.
D
David Teigland 已提交
1630 1631
 */

1632
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
D
David Teigland 已提交
1633
{
1634
	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1635
	flush_workqueue(glock_workqueue);
1636
	glock_hash_walk(clear_glock, sdp);
1637
	flush_workqueue(glock_workqueue);
1638 1639 1640
	wait_event_timeout(sdp->sd_glock_wait,
			   atomic_read(&sdp->sd_glock_disposal) == 0,
			   HZ * 600);
1641
	glock_hash_walk(dump_glock_func, sdp);
D
David Teigland 已提交
1642 1643
}

1644 1645 1646 1647 1648 1649
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
{
	struct gfs2_glock *gl = ip->i_gl;
	int ret;

	ret = gfs2_truncatei_resume(ip);
1650
	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1651

A
Andreas Gruenbacher 已提交
1652
	spin_lock(&gl->gl_lockref.lock);
1653 1654
	clear_bit(GLF_LOCK, &gl->gl_flags);
	run_queue(gl, 1);
A
Andreas Gruenbacher 已提交
1655
	spin_unlock(&gl->gl_lockref.lock);
1656 1657
}

1658
static const char *state2str(unsigned state)
1659
{
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	switch(state) {
	case LM_ST_UNLOCKED:
		return "UN";
	case LM_ST_SHARED:
		return "SH";
	case LM_ST_DEFERRED:
		return "DF";
	case LM_ST_EXCLUSIVE:
		return "EX";
	}
	return "??";
}

B
Bob Peterson 已提交
1673
static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1674 1675 1676 1677 1678 1679 1680 1681 1682
{
	char *p = buf;
	if (flags & LM_FLAG_TRY)
		*p++ = 't';
	if (flags & LM_FLAG_TRY_1CB)
		*p++ = 'T';
	if (flags & LM_FLAG_NOEXP)
		*p++ = 'e';
	if (flags & LM_FLAG_ANY)
1683
		*p++ = 'A';
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	if (flags & LM_FLAG_PRIORITY)
		*p++ = 'p';
	if (flags & GL_ASYNC)
		*p++ = 'a';
	if (flags & GL_EXACT)
		*p++ = 'E';
	if (flags & GL_NOCACHE)
		*p++ = 'c';
	if (test_bit(HIF_HOLDER, &iflags))
		*p++ = 'H';
	if (test_bit(HIF_WAIT, &iflags))
		*p++ = 'W';
	if (test_bit(HIF_FIRST, &iflags))
		*p++ = 'F';
	*p = 0;
	return buf;
1700 1701
}

D
David Teigland 已提交
1702 1703
/**
 * dump_holder - print information about a glock holder
1704
 * @seq: the seq_file struct
D
David Teigland 已提交
1705 1706 1707 1708
 * @gh: the glock holder
 *
 */

1709
static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
D
David Teigland 已提交
1710
{
1711 1712
	struct task_struct *gh_owner = NULL;
	char flags_buf[32];
D
David Teigland 已提交
1713

1714
	rcu_read_lock();
1715
	if (gh->gh_owner_pid)
1716
		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1717 1718 1719 1720 1721 1722 1723
	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
		       state2str(gh->gh_state),
		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
		       gh->gh_error,
		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
		       gh_owner ? gh_owner->comm : "(ended)",
		       (void *)gh->gh_ip);
1724
	rcu_read_unlock();
D
David Teigland 已提交
1725 1726
}

1727
static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1728
{
1729
	const unsigned long *gflags = &gl->gl_flags;
1730
	char *p = buf;
1731

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	if (test_bit(GLF_LOCK, gflags))
		*p++ = 'l';
	if (test_bit(GLF_DEMOTE, gflags))
		*p++ = 'D';
	if (test_bit(GLF_PENDING_DEMOTE, gflags))
		*p++ = 'd';
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
		*p++ = 'p';
	if (test_bit(GLF_DIRTY, gflags))
		*p++ = 'y';
	if (test_bit(GLF_LFLUSH, gflags))
		*p++ = 'f';
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
		*p++ = 'i';
	if (test_bit(GLF_REPLY_PENDING, gflags))
		*p++ = 'r';
1748
	if (test_bit(GLF_INITIAL, gflags))
1749
		*p++ = 'I';
1750 1751
	if (test_bit(GLF_FROZEN, gflags))
		*p++ = 'F';
1752 1753
	if (test_bit(GLF_QUEUED, gflags))
		*p++ = 'q';
1754 1755 1756 1757
	if (test_bit(GLF_LRU, gflags))
		*p++ = 'L';
	if (gl->gl_object)
		*p++ = 'o';
1758 1759
	if (test_bit(GLF_BLOCKING, gflags))
		*p++ = 'b';
1760 1761
	*p = 0;
	return buf;
D
David Teigland 已提交
1762 1763 1764
}

/**
1765
 * gfs2_dump_glock - print information about a glock
1766
 * @seq: The seq_file struct
D
David Teigland 已提交
1767
 * @gl: the glock
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
 *
 * The file format is as follows:
 * One line per object, capital letters are used to indicate objects
 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
 * other objects are indented by a single space and follow the glock to
 * which they are related. Fields are indicated by lower case letters
 * followed by a colon and the field value, except for strings which are in
 * [] so that its possible to see if they are composed of spaces for
 * example. The field's are n = number (id of the object), f = flags,
 * t = type, s = state, r = refcount, e = error, p = pid.
D
David Teigland 已提交
1778 1779 1780
 *
 */

1781
void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
D
David Teigland 已提交
1782
{
1783 1784 1785 1786
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	unsigned long long dtime;
	const struct gfs2_holder *gh;
	char gflags_buf[32];
D
David Teigland 已提交
1787

1788 1789 1790 1791
	dtime = jiffies - gl->gl_demote_time;
	dtime *= 1000000/HZ; /* demote time in uSec */
	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
		dtime = 0;
1792
	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1793 1794 1795
		  state2str(gl->gl_state),
		  gl->gl_name.ln_type,
		  (unsigned long long)gl->gl_name.ln_number,
1796
		  gflags2str(gflags_buf, gl),
1797 1798 1799
		  state2str(gl->gl_target),
		  state2str(gl->gl_demote_state), dtime,
		  atomic_read(&gl->gl_ail_count),
1800
		  atomic_read(&gl->gl_revokes),
S
Steven Whitehouse 已提交
1801
		  (int)gl->gl_lockref.count, gl->gl_hold_time);
D
David Teigland 已提交
1802

1803 1804 1805
	list_for_each_entry(gh, &gl->gl_holders, gh_list)
		dump_holder(seq, gh);

1806
	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1807
		glops->go_dump(seq, gl);
D
David Teigland 已提交
1808 1809
}

1810 1811 1812 1813
static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
	struct gfs2_glock *gl = iter_ptr;

1814
	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1815 1816
		   gl->gl_name.ln_type,
		   (unsigned long long)gl->gl_name.ln_number,
1817 1818 1819 1820 1821 1822 1823 1824
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
	return 0;
}

static const char *gfs2_gltype[] = {
	"type",
	"reserved",
	"nondisk",
	"inode",
	"rgrp",
	"meta",
	"iopen",
	"flock",
	"plock",
	"quota",
	"journal",
};

static const char *gfs2_stype[] = {
	[GFS2_LKS_SRTT]		= "srtt",
	[GFS2_LKS_SRTTVAR]	= "srttvar",
	[GFS2_LKS_SRTTB]	= "srttb",
	[GFS2_LKS_SRTTVARB]	= "srttvarb",
	[GFS2_LKS_SIRT]		= "sirt",
	[GFS2_LKS_SIRTVAR]	= "sirtvar",
	[GFS2_LKS_DCOUNT]	= "dlm",
	[GFS2_LKS_QCOUNT]	= "queue",
};

#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))

static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
{
1857 1858 1859 1860
	struct gfs2_sbd *sdp = seq->private;
	loff_t pos = *(loff_t *)iter_ptr;
	unsigned index = pos >> 3;
	unsigned subindex = pos & 0x07;
1861 1862 1863 1864
	int i;

	if (index == 0 && subindex != 0)
		return 0;
1865

1866 1867
	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
		   (index == 0) ? "cpu": gfs2_stype[subindex]);
D
David Teigland 已提交
1868

1869 1870
	for_each_possible_cpu(i) {
                const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1871 1872 1873 1874 1875 1876

		if (index == 0)
			seq_printf(seq, " %15u", i);
		else
			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
				   lkstats[index - 1].stats[subindex]);
1877 1878 1879 1880
	}
	seq_putc(seq, '\n');
	return 0;
}
1881

1882 1883
int __init gfs2_glock_init(void)
{
1884
	int i, ret;
1885 1886 1887 1888

	ret = rhashtable_init(&gl_hash_table, &ht_parms);
	if (ret < 0)
		return ret;
1889

1890
	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1891
					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1892 1893
	if (!glock_workqueue) {
		rhashtable_destroy(&gl_hash_table);
1894
		return -ENOMEM;
1895
	}
1896
	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1897
						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1898
						0);
1899
	if (!gfs2_delete_workqueue) {
1900
		destroy_workqueue(glock_workqueue);
1901
		rhashtable_destroy(&gl_hash_table);
1902
		return -ENOMEM;
1903
	}
1904

1905 1906 1907 1908 1909 1910 1911
	ret = register_shrinker(&glock_shrinker);
	if (ret) {
		destroy_workqueue(gfs2_delete_workqueue);
		destroy_workqueue(glock_workqueue);
		rhashtable_destroy(&gl_hash_table);
		return ret;
	}
1912

1913 1914 1915
	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
		init_waitqueue_head(glock_wait_table + i);

1916 1917 1918
	return 0;
}

1919 1920
void gfs2_glock_exit(void)
{
1921
	unregister_shrinker(&glock_shrinker);
1922
	rhashtable_destroy(&gl_hash_table);
1923
	destroy_workqueue(glock_workqueue);
1924
	destroy_workqueue(gfs2_delete_workqueue);
1925 1926
}

1927
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1928
{
1929
	while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
1930 1931 1932 1933
		if (IS_ERR(gi->gl)) {
			if (PTR_ERR(gi->gl) == -EAGAIN)
				continue;
			gi->gl = NULL;
1934
			return;
1935
		}
1936 1937 1938 1939 1940
		/* Skip entries for other sb and dead entries */
		if (gi->sdp == gi->gl->gl_name.ln_sbd &&
		    !__lockref_is_dead(&gi->gl->gl_lockref))
			return;
	}
1941 1942
}

1943
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
B
Bob Peterson 已提交
1944
	__acquires(RCU)
1945
{
1946
	struct gfs2_glock_iter *gi = seq->private;
1947
	loff_t n = *pos;
1948

1949 1950
	rhashtable_walk_enter(&gl_hash_table, &gi->hti);
	if (rhashtable_walk_start(&gi->hti) != 0)
1951
		return NULL;
1952

1953
	do {
1954 1955
		gfs2_glock_iter_next(gi);
	} while (gi->gl && n--);
1956

1957
	gi->last_pos = *pos;
1958

1959
	return gi->gl;
1960 1961
}

1962
static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1963 1964
				 loff_t *pos)
{
1965
	struct gfs2_glock_iter *gi = seq->private;
1966 1967

	(*pos)++;
1968
	gi->last_pos = *pos;
1969
	gfs2_glock_iter_next(gi);
1970

1971
	return gi->gl;
1972 1973
}

1974
static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
B
Bob Peterson 已提交
1975
	__releases(RCU)
1976
{
1977
	struct gfs2_glock_iter *gi = seq->private;
1978 1979

	gi->gl = NULL;
1980
	rhashtable_walk_stop(&gi->hti);
1981
	rhashtable_walk_exit(&gi->hti);
1982 1983
}

1984
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1985
{
1986 1987
	dump_glock(seq, iter_ptr);
	return 0;
1988 1989
}

1990 1991
static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
{
1992
	preempt_disable();
1993 1994
	if (*pos >= GFS2_NR_SBSTATS)
		return NULL;
1995
	return pos;
1996 1997 1998 1999 2000 2001
}

static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
				   loff_t *pos)
{
	(*pos)++;
2002
	if (*pos >= GFS2_NR_SBSTATS)
2003
		return NULL;
2004
	return pos;
2005 2006 2007 2008 2009 2010 2011
}

static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
{
	preempt_enable();
}

2012
static const struct seq_operations gfs2_glock_seq_ops = {
2013 2014 2015 2016 2017 2018
	.start = gfs2_glock_seq_start,
	.next  = gfs2_glock_seq_next,
	.stop  = gfs2_glock_seq_stop,
	.show  = gfs2_glock_seq_show,
};

2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
static const struct seq_operations gfs2_glstats_seq_ops = {
	.start = gfs2_glock_seq_start,
	.next  = gfs2_glock_seq_next,
	.stop  = gfs2_glock_seq_stop,
	.show  = gfs2_glstats_seq_show,
};

static const struct seq_operations gfs2_sbstats_seq_ops = {
	.start = gfs2_sbstats_seq_start,
	.next  = gfs2_sbstats_seq_next,
	.stop  = gfs2_sbstats_seq_stop,
	.show  = gfs2_sbstats_seq_show,
};

2033 2034
#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)

2035 2036
static int __gfs2_glocks_open(struct inode *inode, struct file *file,
			      const struct seq_operations *ops)
2037
{
2038
	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2039 2040 2041
	if (ret == 0) {
		struct seq_file *seq = file->private_data;
		struct gfs2_glock_iter *gi = seq->private;
2042

2043
		gi->sdp = inode->i_private;
2044
		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2045
		if (seq->buf)
2046
			seq->size = GFS2_SEQ_GOODSIZE;
2047
		gi->gl = NULL;
2048 2049
	}
	return ret;
2050 2051
}

2052 2053 2054 2055 2056
static int gfs2_glocks_open(struct inode *inode, struct file *file)
{
	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
}

2057 2058 2059 2060 2061 2062 2063 2064 2065
static int gfs2_glocks_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct gfs2_glock_iter *gi = seq->private;

	gi->gl = NULL;
	return seq_release_private(inode, file);
}

2066 2067
static int gfs2_glstats_open(struct inode *inode, struct file *file)
{
2068
	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2069 2070 2071 2072
}

static int gfs2_sbstats_open(struct inode *inode, struct file *file)
{
2073
	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2074 2075
	if (ret == 0) {
		struct seq_file *seq = file->private_data;
2076
		seq->private = inode->i_private;  /* sdp */
2077 2078 2079 2080 2081 2082 2083 2084 2085
	}
	return ret;
}

static const struct file_operations gfs2_glocks_fops = {
	.owner   = THIS_MODULE,
	.open    = gfs2_glocks_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
2086
	.release = gfs2_glocks_release,
2087 2088 2089
};

static const struct file_operations gfs2_glstats_fops = {
2090
	.owner   = THIS_MODULE,
2091 2092 2093
	.open    = gfs2_glstats_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
2094
	.release = gfs2_glocks_release,
2095 2096 2097 2098 2099
};

static const struct file_operations gfs2_sbstats_fops = {
	.owner   = THIS_MODULE,
	.open	 = gfs2_sbstats_open,
2100 2101
	.read    = seq_read,
	.llseek  = seq_lseek,
2102
	.release = seq_release,
2103 2104 2105 2106
};

int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
{
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
	struct dentry *dent;

	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
	if (IS_ERR_OR_NULL(dent))
		goto fail;
	sdp->debugfs_dir = dent;

	dent = debugfs_create_file("glocks",
				   S_IFREG | S_IRUGO,
				   sdp->debugfs_dir, sdp,
				   &gfs2_glocks_fops);
	if (IS_ERR_OR_NULL(dent))
2119
		goto fail;
2120
	sdp->debugfs_dentry_glocks = dent;
2121

2122 2123 2124 2125 2126
	dent = debugfs_create_file("glstats",
				   S_IFREG | S_IRUGO,
				   sdp->debugfs_dir, sdp,
				   &gfs2_glstats_fops);
	if (IS_ERR_OR_NULL(dent))
2127
		goto fail;
2128
	sdp->debugfs_dentry_glstats = dent;
2129

2130 2131 2132 2133 2134
	dent = debugfs_create_file("sbstats",
				   S_IFREG | S_IRUGO,
				   sdp->debugfs_dir, sdp,
				   &gfs2_sbstats_fops);
	if (IS_ERR_OR_NULL(dent))
2135
		goto fail;
2136
	sdp->debugfs_dentry_sbstats = dent;
2137 2138

	return 0;
2139 2140
fail:
	gfs2_delete_debugfs_file(sdp);
2141
	return dent ? PTR_ERR(dent) : -ENOMEM;
2142 2143 2144 2145
}

void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
{
2146
	if (sdp->debugfs_dir) {
R
Robert Peterson 已提交
2147 2148 2149 2150
		if (sdp->debugfs_dentry_glocks) {
			debugfs_remove(sdp->debugfs_dentry_glocks);
			sdp->debugfs_dentry_glocks = NULL;
		}
2151 2152 2153 2154 2155 2156 2157 2158
		if (sdp->debugfs_dentry_glstats) {
			debugfs_remove(sdp->debugfs_dentry_glstats);
			sdp->debugfs_dentry_glstats = NULL;
		}
		if (sdp->debugfs_dentry_sbstats) {
			debugfs_remove(sdp->debugfs_dentry_sbstats);
			sdp->debugfs_dentry_sbstats = NULL;
		}
R
Robert Peterson 已提交
2159 2160 2161
		debugfs_remove(sdp->debugfs_dir);
		sdp->debugfs_dir = NULL;
	}
2162 2163 2164 2165 2166
}

int gfs2_register_debugfs(void)
{
	gfs2_root = debugfs_create_dir("gfs2", NULL);
2167 2168
	if (IS_ERR(gfs2_root))
		return PTR_ERR(gfs2_root);
2169 2170 2171 2172 2173 2174
	return gfs2_root ? 0 : -ENOMEM;
}

void gfs2_unregister_debugfs(void)
{
	debugfs_remove(gfs2_root);
R
Robert Peterson 已提交
2175
	gfs2_root = NULL;
2176
}