lockspace.c 18.4 KB
Newer Older
1 2 3 4
/******************************************************************************
*******************************************************************************
**
**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5
**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
**
**  This copyrighted material is made available to anyone wishing to use,
**  modify, copy, or redistribute it subject to the terms and conditions
**  of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/

#include "dlm_internal.h"
#include "lockspace.h"
#include "member.h"
#include "recoverd.h"
#include "ast.h"
#include "dir.h"
#include "lowcomms.h"
#include "config.h"
#include "memory.h"
#include "lock.h"
24
#include "recover.h"
25
#include "requestqueue.h"
26
#include "user.h"
27 28

static int			ls_count;
29
static struct mutex		ls_lock;
30 31 32 33 34 35 36 37 38 39
static struct list_head		lslist;
static spinlock_t		lslist_lock;
static struct task_struct *	scand_task;


static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
{
	ssize_t ret = len;
	int n = simple_strtol(buf, NULL, 0);

40 41 42 43
	ls = dlm_find_lockspace_local(ls->ls_local_handle);
	if (!ls)
		return -EINVAL;

44 45 46 47 48 49 50 51 52 53
	switch (n) {
	case 0:
		dlm_ls_stop(ls);
		break;
	case 1:
		dlm_ls_start(ls);
		break;
	default:
		ret = -EINVAL;
	}
54
	dlm_put_lockspace(ls);
55 56 57 58 59 60 61 62 63 64 65 66 67
	return ret;
}

static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
{
	ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
	set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
	wake_up(&ls->ls_uevent_wait);
	return len;
}

static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
{
68
	return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
69 70 71 72 73 74 75 76
}

static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
{
	ls->ls_global_id = simple_strtoul(buf, NULL, 0);
	return len;
}

77 78 79
static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
{
	uint32_t status = dlm_recover_status(ls);
80
	return snprintf(buf, PAGE_SIZE, "%x\n", status);
81 82
}

83 84
static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
{
85
	return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
86 87
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
struct dlm_attr {
	struct attribute attr;
	ssize_t (*show)(struct dlm_ls *, char *);
	ssize_t (*store)(struct dlm_ls *, const char *, size_t);
};

static struct dlm_attr dlm_attr_control = {
	.attr  = {.name = "control", .mode = S_IWUSR},
	.store = dlm_control_store
};

static struct dlm_attr dlm_attr_event = {
	.attr  = {.name = "event_done", .mode = S_IWUSR},
	.store = dlm_event_store
};

static struct dlm_attr dlm_attr_id = {
	.attr  = {.name = "id", .mode = S_IRUGO | S_IWUSR},
	.show  = dlm_id_show,
	.store = dlm_id_store
};

110 111 112 113 114
static struct dlm_attr dlm_attr_recover_status = {
	.attr  = {.name = "recover_status", .mode = S_IRUGO},
	.show  = dlm_recover_status_show
};

115 116 117 118 119
static struct dlm_attr dlm_attr_recover_nodeid = {
	.attr  = {.name = "recover_nodeid", .mode = S_IRUGO},
	.show  = dlm_recover_nodeid_show
};

120 121 122 123
static struct attribute *dlm_attrs[] = {
	&dlm_attr_control.attr,
	&dlm_attr_event.attr,
	&dlm_attr_id.attr,
124
	&dlm_attr_recover_status.attr,
125
	&dlm_attr_recover_nodeid.attr,
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	NULL,
};

static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
			     char *buf)
{
	struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
	struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
	return a->show ? a->show(ls, buf) : 0;
}

static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
			      const char *buf, size_t len)
{
	struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
	struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
	return a->store ? a->store(ls, buf, len) : len;
}

P
Patrick Caulfield 已提交
145 146 147 148 149 150
static void lockspace_kobj_release(struct kobject *k)
{
	struct dlm_ls *ls  = container_of(k, struct dlm_ls, ls_kobj);
	kfree(ls);
}

151 152 153 154 155 156 157 158
static struct sysfs_ops dlm_attr_ops = {
	.show  = dlm_attr_show,
	.store = dlm_attr_store,
};

static struct kobj_type dlm_ktype = {
	.default_attrs = dlm_attrs,
	.sysfs_ops     = &dlm_attr_ops,
P
Patrick Caulfield 已提交
159
	.release       = lockspace_kobj_release,
160 161
};

162
static struct kset *dlm_kset;
163 164 165 166 167 168 169 170 171 172

static int do_uevent(struct dlm_ls *ls, int in)
{
	int error;

	if (in)
		kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
	else
		kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);

173 174 175 176 177
	log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");

	/* dlm_controld will see the uevent, do the necessary group management
	   and then write to sysfs to wake us */

178 179
	error = wait_event_interruptible(ls->ls_uevent_wait,
			test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
180 181 182

	log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);

183 184 185 186 187
	if (error)
		goto out;

	error = ls->ls_uevent_result;
 out:
188 189 190
	if (error)
		log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
			  error, ls->ls_uevent_result);
191 192 193 194
	return error;
}


195
int __init dlm_lockspace_init(void)
196 197
{
	ls_count = 0;
198
	mutex_init(&ls_lock);
199 200 201
	INIT_LIST_HEAD(&lslist);
	spin_lock_init(&lslist_lock);

202
	dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
203
	if (!dlm_kset) {
204
		printk(KERN_WARNING "%s: can not create kset\n", __func__);
205 206 207
		return -ENOMEM;
	}
	return 0;
208 209 210 211
}

void dlm_lockspace_exit(void)
{
212
	kset_unregister(dlm_kset);
213 214
}

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
static struct dlm_ls *find_ls_to_scan(void)
{
	struct dlm_ls *ls;

	spin_lock(&lslist_lock);
	list_for_each_entry(ls, &lslist, ls_list) {
		if (time_after_eq(jiffies, ls->ls_scan_time +
					    dlm_config.ci_scan_secs * HZ)) {
			spin_unlock(&lslist_lock);
			return ls;
		}
	}
	spin_unlock(&lslist_lock);
	return NULL;
}

231 232 233
static int dlm_scand(void *data)
{
	struct dlm_ls *ls;
234
	int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
235 236

	while (!kthread_should_stop()) {
237 238
		ls = find_ls_to_scan();
		if (ls) {
239
			if (dlm_lock_recovery_try(ls)) {
240
				ls->ls_scan_time = jiffies;
241
				dlm_scan_rsbs(ls);
242
				dlm_scan_timeout(ls);
243
				dlm_unlock_recovery(ls);
244 245
			} else {
				ls->ls_scan_time += HZ;
246
			}
247 248
		} else {
			schedule_timeout_interruptible(timeout_jiffies);
249
		}
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	}
	return 0;
}

static int dlm_scand_start(void)
{
	struct task_struct *p;
	int error = 0;

	p = kthread_run(dlm_scand, NULL, "dlm_scand");
	if (IS_ERR(p))
		error = PTR_ERR(p);
	else
		scand_task = p;
	return error;
}

static void dlm_scand_stop(void)
{
	kthread_stop(scand_task);
}

struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{
	struct dlm_ls *ls;

	spin_lock(&lslist_lock);

	list_for_each_entry(ls, &lslist, ls_list) {
		if (ls->ls_global_id == id) {
			ls->ls_count++;
			goto out;
		}
	}
	ls = NULL;
 out:
	spin_unlock(&lslist_lock);
	return ls;
}

D
David Teigland 已提交
290
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
291
{
D
David Teigland 已提交
292
	struct dlm_ls *ls;
293 294

	spin_lock(&lslist_lock);
D
David Teigland 已提交
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	list_for_each_entry(ls, &lslist, ls_list) {
		if (ls->ls_local_handle == lockspace) {
			ls->ls_count++;
			goto out;
		}
	}
	ls = NULL;
 out:
	spin_unlock(&lslist_lock);
	return ls;
}

struct dlm_ls *dlm_find_lockspace_device(int minor)
{
	struct dlm_ls *ls;

	spin_lock(&lslist_lock);
	list_for_each_entry(ls, &lslist, ls_list) {
		if (ls->ls_device.minor == minor) {
			ls->ls_count++;
			goto out;
		}
	}
	ls = NULL;
 out:
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	spin_unlock(&lslist_lock);
	return ls;
}

void dlm_put_lockspace(struct dlm_ls *ls)
{
	spin_lock(&lslist_lock);
	ls->ls_count--;
	spin_unlock(&lslist_lock);
}

static void remove_lockspace(struct dlm_ls *ls)
{
	for (;;) {
		spin_lock(&lslist_lock);
		if (ls->ls_count == 0) {
336
			WARN_ON(ls->ls_create_count != 0);
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
			list_del(&ls->ls_list);
			spin_unlock(&lslist_lock);
			return;
		}
		spin_unlock(&lslist_lock);
		ssleep(1);
	}
}

static int threads_start(void)
{
	int error;

	/* Thread which process lock requests for all lockspace's */
	error = dlm_astd_start();
	if (error) {
		log_print("cannot start dlm_astd thread %d", error);
		goto fail;
	}

	error = dlm_scand_start();
	if (error) {
		log_print("cannot start dlm_scand thread %d", error);
		goto astd_fail;
	}

	/* Thread for sending/receiving messages for all lockspace's */
	error = dlm_lowcomms_start();
	if (error) {
		log_print("cannot start dlm lowcomms %d", error);
		goto scand_fail;
	}

	return 0;

 scand_fail:
	dlm_scand_stop();
 astd_fail:
	dlm_astd_stop();
 fail:
	return error;
}

static void threads_stop(void)
{
	dlm_scand_stop();
	dlm_lowcomms_stop();
	dlm_astd_stop();
}

static int new_lockspace(char *name, int namelen, void **lockspace,
			 uint32_t flags, int lvblen)
{
	struct dlm_ls *ls;
391
	int i, size, error;
392
	int do_unreg = 0;
393 394 395 396 397 398 399 400 401 402

	if (namelen > DLM_LOCKSPACE_LEN)
		return -EINVAL;

	if (!lvblen || (lvblen % 8))
		return -EINVAL;

	if (!try_module_get(THIS_MODULE))
		return -EINVAL;

403 404 405 406 407
	if (!dlm_user_daemon_available()) {
		module_put(THIS_MODULE);
		return -EUNATCH;
	}

408 409 410 411 412 413 414 415 416 417 418 419 420 421
	error = 0;

	spin_lock(&lslist_lock);
	list_for_each_entry(ls, &lslist, ls_list) {
		WARN_ON(ls->ls_create_count <= 0);
		if (ls->ls_namelen != namelen)
			continue;
		if (memcmp(ls->ls_name, name, namelen))
			continue;
		if (flags & DLM_LSFL_NEWEXCL) {
			error = -EEXIST;
			break;
		}
		ls->ls_create_count++;
422
		module_put(THIS_MODULE);
423 424
		error = 1; /* not an error, return 0 */
		break;
425
	}
426 427 428 429 430 431 432 433
	spin_unlock(&lslist_lock);

	if (error < 0)
		goto out;
	if (error)
		goto ret_zero;

	error = -ENOMEM;
434

435
	ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
436 437 438 439 440 441 442
	if (!ls)
		goto out;
	memcpy(ls->ls_name, name, namelen);
	ls->ls_namelen = namelen;
	ls->ls_lvblen = lvblen;
	ls->ls_count = 0;
	ls->ls_flags = 0;
443
	ls->ls_scan_time = jiffies;
444

445 446 447
	if (flags & DLM_LSFL_TIMEWARN)
		set_bit(LSFL_TIMEWARN, &ls->ls_flags);

P
Patrick Caulfield 已提交
448 449 450 451 452
	if (flags & DLM_LSFL_FS)
		ls->ls_allocation = GFP_NOFS;
	else
		ls->ls_allocation = GFP_KERNEL;

453
	/* ls_exflags are forced to match among nodes, and we don't
454 455 456
	   need to require all nodes to have some flags set */
	ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
				    DLM_LSFL_NEWEXCL));
457

458
	size = dlm_config.ci_rsbtbl_size;
459 460 461 462 463 464 465 466
	ls->ls_rsbtbl_size = size;

	ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
	if (!ls->ls_rsbtbl)
		goto out_lsfree;
	for (i = 0; i < size; i++) {
		INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
		INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
467
		spin_lock_init(&ls->ls_rsbtbl[i].lock);
468 469
	}

470
	size = dlm_config.ci_lkbtbl_size;
471 472 473 474 475 476 477 478 479 480 481
	ls->ls_lkbtbl_size = size;

	ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
	if (!ls->ls_lkbtbl)
		goto out_rsbfree;
	for (i = 0; i < size; i++) {
		INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
		rwlock_init(&ls->ls_lkbtbl[i].lock);
		ls->ls_lkbtbl[i].counter = 1;
	}

482
	size = dlm_config.ci_dirtbl_size;
483 484 485 486 487 488 489 490 491 492 493
	ls->ls_dirtbl_size = size;

	ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
	if (!ls->ls_dirtbl)
		goto out_lkbfree;
	for (i = 0; i < size; i++) {
		INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
		rwlock_init(&ls->ls_dirtbl[i].lock);
	}

	INIT_LIST_HEAD(&ls->ls_waiters);
494
	mutex_init(&ls->ls_waiters_mutex);
495 496
	INIT_LIST_HEAD(&ls->ls_orphans);
	mutex_init(&ls->ls_orphans_mutex);
497 498
	INIT_LIST_HEAD(&ls->ls_timeout);
	mutex_init(&ls->ls_timeout_mutex);
499 500 501 502 503 504 505 506 507 508 509

	INIT_LIST_HEAD(&ls->ls_nodes);
	INIT_LIST_HEAD(&ls->ls_nodes_gone);
	ls->ls_num_nodes = 0;
	ls->ls_low_nodeid = 0;
	ls->ls_total_weight = 0;
	ls->ls_node_array = NULL;

	memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
	ls->ls_stub_rsb.res_ls = ls;

D
David Teigland 已提交
510 511
	ls->ls_debug_rsb_dentry = NULL;
	ls->ls_debug_waiters_dentry = NULL;
512 513 514

	init_waitqueue_head(&ls->ls_uevent_wait);
	ls->ls_uevent_result = 0;
515 516
	init_completion(&ls->ls_members_done);
	ls->ls_members_result = -1;
517 518

	ls->ls_recoverd_task = NULL;
519
	mutex_init(&ls->ls_recoverd_active);
520
	spin_lock_init(&ls->ls_recover_lock);
521 522
	spin_lock_init(&ls->ls_rcom_spin);
	get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
523 524 525 526
	ls->ls_recover_status = 0;
	ls->ls_recover_seq = 0;
	ls->ls_recover_args = NULL;
	init_rwsem(&ls->ls_in_recovery);
527
	init_rwsem(&ls->ls_recv_active);
528
	INIT_LIST_HEAD(&ls->ls_requestqueue);
529
	mutex_init(&ls->ls_requestqueue_mutex);
D
David Teigland 已提交
530
	mutex_init(&ls->ls_clear_proc_locks);
531

532
	ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
533 534 535 536 537 538
	if (!ls->ls_recover_buf)
		goto out_dirfree;

	INIT_LIST_HEAD(&ls->ls_recover_list);
	spin_lock_init(&ls->ls_recover_list_lock);
	ls->ls_recover_list_count = 0;
D
David Teigland 已提交
539
	ls->ls_local_handle = ls;
540 541 542 543 544 545
	init_waitqueue_head(&ls->ls_wait_general);
	INIT_LIST_HEAD(&ls->ls_root_list);
	init_rwsem(&ls->ls_root_sem);

	down_write(&ls->ls_in_recovery);

546
	spin_lock(&lslist_lock);
547
	ls->ls_create_count = 1;
548 549 550 551
	list_add(&ls->ls_list, &lslist);
	spin_unlock(&lslist_lock);

	/* needs to find ls in lslist */
552 553 554
	error = dlm_recoverd_start(ls);
	if (error) {
		log_error(ls, "can't start dlm_recoverd %d", error);
555
		goto out_delist;
556 557
	}

558 559 560
	ls->ls_kobj.kset = dlm_kset;
	error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
				     "%s", ls->ls_name);
561
	if (error)
562
		goto out_stop;
563
	kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
564 565 566

	/* let kobject handle freeing of ls if there's an error */
	do_unreg = 1;
567

568 569 570 571 572 573
	/* This uevent triggers dlm_controld in userspace to add us to the
	   group of nodes that are members of this lockspace (managed by the
	   cluster infrastructure.)  Once it's done that, it tells us who the
	   current lockspace members are (via configfs) and then tells the
	   lockspace to start running (via sysfs) in dlm_ls_start(). */

574 575
	error = do_uevent(ls, 1);
	if (error)
576 577
		goto out_stop;

578 579 580 581 582
	wait_for_completion(&ls->ls_members_done);
	error = ls->ls_members_result;
	if (error)
		goto out_members;

583 584 585
	dlm_create_debug_file(ls);

	log_debug(ls, "join complete");
586
 ret_zero:
587 588 589
	*lockspace = ls;
	return 0;

590 591 592 593
 out_members:
	do_uevent(ls, 0);
	dlm_clear_members(ls);
	kfree(ls->ls_node_array);
594
 out_stop:
595
	dlm_recoverd_stop(ls);
596
 out_delist:
597 598 599 600 601 602 603 604 605 606 607
	spin_lock(&lslist_lock);
	list_del(&ls->ls_list);
	spin_unlock(&lslist_lock);
	kfree(ls->ls_recover_buf);
 out_dirfree:
	kfree(ls->ls_dirtbl);
 out_lkbfree:
	kfree(ls->ls_lkbtbl);
 out_rsbfree:
	kfree(ls->ls_rsbtbl);
 out_lsfree:
608
	if (do_unreg)
609
		kobject_put(&ls->ls_kobj);
610 611
	else
		kfree(ls);
612 613 614 615 616 617 618 619 620 621
 out:
	module_put(THIS_MODULE);
	return error;
}

int dlm_new_lockspace(char *name, int namelen, void **lockspace,
		      uint32_t flags, int lvblen)
{
	int error = 0;

622
	mutex_lock(&ls_lock);
623 624 625 626 627 628 629 630
	if (!ls_count)
		error = threads_start();
	if (error)
		goto out;

	error = new_lockspace(name, namelen, lockspace, flags, lvblen);
	if (!error)
		ls_count++;
631 632
	else if (!ls_count)
		threads_stop();
633
 out:
634
	mutex_unlock(&ls_lock);
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	return error;
}

/* Return 1 if the lockspace still has active remote locks,
 *        2 if the lockspace still has active local locks.
 */
static int lockspace_busy(struct dlm_ls *ls)
{
	int i, lkb_found = 0;
	struct dlm_lkb *lkb;

	/* NOTE: We check the lockidtbl here rather than the resource table.
	   This is because there may be LKBs queued as ASTs that have been
	   unlinked from their RSBs and are pending deletion once the AST has
	   been delivered */

	for (i = 0; i < ls->ls_lkbtbl_size; i++) {
		read_lock(&ls->ls_lkbtbl[i].lock);
		if (!list_empty(&ls->ls_lkbtbl[i].list)) {
			lkb_found = 1;
			list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
					    lkb_idtbl_list) {
				if (!lkb->lkb_nodeid) {
					read_unlock(&ls->ls_lkbtbl[i].lock);
					return 2;
				}
			}
		}
		read_unlock(&ls->ls_lkbtbl[i].lock);
	}
	return lkb_found;
}

static int release_lockspace(struct dlm_ls *ls, int force)
{
	struct dlm_lkb *lkb;
	struct dlm_rsb *rsb;
	struct list_head *head;
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	int i, busy, rv;

	busy = lockspace_busy(ls);

	spin_lock(&lslist_lock);
	if (ls->ls_create_count == 1) {
		if (busy > force)
			rv = -EBUSY;
		else {
			/* remove_lockspace takes ls off lslist */
			ls->ls_create_count = 0;
			rv = 0;
		}
	} else if (ls->ls_create_count > 1) {
		rv = --ls->ls_create_count;
	} else {
		rv = -EINVAL;
	}
	spin_unlock(&lslist_lock);

	if (rv) {
		log_debug(ls, "release_lockspace no remove %d", rv);
		return rv;
	}
697

698
	dlm_device_deregister(ls);
699

700
	if (force < 3 && dlm_user_daemon_available())
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
		do_uevent(ls, 0);

	dlm_recoverd_stop(ls);

	remove_lockspace(ls);

	dlm_delete_debug_file(ls);

	dlm_astd_suspend();

	kfree(ls->ls_recover_buf);

	/*
	 * Free direntry structs.
	 */

	dlm_dir_clear(ls);
	kfree(ls->ls_dirtbl);

	/*
	 * Free all lkb's on lkbtbl[] lists.
	 */

	for (i = 0; i < ls->ls_lkbtbl_size; i++) {
		head = &ls->ls_lkbtbl[i].list;
		while (!list_empty(head)) {
			lkb = list_entry(head->next, struct dlm_lkb,
					 lkb_idtbl_list);

			list_del(&lkb->lkb_idtbl_list);

			dlm_del_ast(lkb);

			if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
735
				dlm_free_lvb(lkb->lkb_lvbptr);
736

737
			dlm_free_lkb(lkb);
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
		}
	}
	dlm_astd_resume();

	kfree(ls->ls_lkbtbl);

	/*
	 * Free all rsb's on rsbtbl[] lists
	 */

	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
		head = &ls->ls_rsbtbl[i].list;
		while (!list_empty(head)) {
			rsb = list_entry(head->next, struct dlm_rsb,
					 res_hashchain);

			list_del(&rsb->res_hashchain);
755
			dlm_free_rsb(rsb);
756 757 758 759 760 761 762
		}

		head = &ls->ls_rsbtbl[i].toss;
		while (!list_empty(head)) {
			rsb = list_entry(head->next, struct dlm_rsb,
					 res_hashchain);
			list_del(&rsb->res_hashchain);
763
			dlm_free_rsb(rsb);
764 765 766 767 768 769 770 771 772
		}
	}

	kfree(ls->ls_rsbtbl);

	/*
	 * Free structures on any other lists
	 */

773
	dlm_purge_requestqueue(ls);
774 775 776 777 778
	kfree(ls->ls_recover_args);
	dlm_clear_free_entries(ls);
	dlm_clear_members(ls);
	dlm_clear_members_gone(ls);
	kfree(ls->ls_node_array);
779
	log_debug(ls, "release_lockspace final free");
780
	kobject_put(&ls->ls_kobj);
781
	/* The ls structure will be freed when the kobject is done with */
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803

	module_put(THIS_MODULE);
	return 0;
}

/*
 * Called when a system has released all its locks and is not going to use the
 * lockspace any longer.  We free everything we're managing for this lockspace.
 * Remaining nodes will go through the recovery process as if we'd died.  The
 * lockspace must continue to function as usual, participating in recoveries,
 * until this returns.
 *
 * Force has 4 possible values:
 * 0 - don't destroy locksapce if it has any LKBs
 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
 * 2 - destroy lockspace regardless of LKBs
 * 3 - destroy lockspace as part of a forced shutdown
 */

int dlm_release_lockspace(void *lockspace, int force)
{
	struct dlm_ls *ls;
804
	int error;
805 806 807 808 809

	ls = dlm_find_lockspace_local(lockspace);
	if (!ls)
		return -EINVAL;
	dlm_put_lockspace(ls);
810 811 812 813 814

	mutex_lock(&ls_lock);
	error = release_lockspace(ls, force);
	if (!error)
		ls_count--;
D
David Teigland 已提交
815
	if (!ls_count)
816 817 818 819
		threads_stop();
	mutex_unlock(&ls_lock);

	return error;
820 821
}

822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
void dlm_stop_lockspaces(void)
{
	struct dlm_ls *ls;

 restart:
	spin_lock(&lslist_lock);
	list_for_each_entry(ls, &lslist, ls_list) {
		if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
			continue;
		spin_unlock(&lslist_lock);
		log_error(ls, "no userland control daemon, stopping lockspace");
		dlm_ls_stop(ls);
		goto restart;
	}
	spin_unlock(&lslist_lock);
}