dlmrecovery.c 87.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/* -*- mode: c; c-basic-offset: 8; -*-
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * dlmrecovery.c
 *
 * recovery stuff
 *
 * Copyright (C) 2004 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 *
 */


#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/timer.h>
#include <linux/kthread.h>
41
#include <linux/delay.h>
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93


#include "cluster/heartbeat.h"
#include "cluster/nodemanager.h"
#include "cluster/tcp.h"

#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"

#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
#include "cluster/masklog.h"

static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);

static int dlm_recovery_thread(void *data);
static int dlm_do_recovery(struct dlm_ctxt *dlm);

static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_request_all_locks(struct dlm_ctxt *dlm,
				 u8 request_from, u8 dead_node);
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);

static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
					const char *lockname, int namelen,
					int total_locks, u64 cookie,
					u8 flags, u8 master);
static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
				    struct dlm_migratable_lockres *mres,
				    u8 send_to,
				    struct dlm_lock_resource *res,
				    int total_locks);
static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
				     struct dlm_lock_resource *res,
				     struct dlm_migratable_lockres *mres);
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
				 u8 dead_node, u8 send_to);
static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
					struct list_head *list, u8 dead_node);
static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
					      u8 dead_node, u8 new_master);
static void dlm_reco_ast(void *astdata);
static void dlm_reco_bast(void *astdata, int blocked_type);
static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
static void dlm_request_all_locks_worker(struct dlm_work_item *item,
					 void *data);
static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
94 95 96
static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
				      struct dlm_lock_resource *res,
				      u8 *real_master);
97 98 99

static u64 dlm_get_next_mig_cookie(void);

I
Ingo Molnar 已提交
100 101
static DEFINE_SPINLOCK(dlm_reco_state_lock);
static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static u64 dlm_mig_cookie = 1;

static u64 dlm_get_next_mig_cookie(void)
{
	u64 c;
	spin_lock(&dlm_mig_cookie_lock);
	c = dlm_mig_cookie;
	if (dlm_mig_cookie == (~0ULL))
		dlm_mig_cookie = 1;
	else
		dlm_mig_cookie++;
	spin_unlock(&dlm_mig_cookie_lock);
	return c;
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
					  u8 dead_node)
{
	assert_spin_locked(&dlm->spinlock);
	if (dlm->reco.dead_node != dead_node)
		mlog(0, "%s: changing dead_node from %u to %u\n",
		     dlm->name, dlm->reco.dead_node, dead_node);
	dlm->reco.dead_node = dead_node;
}

static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
				       u8 master)
{
	assert_spin_locked(&dlm->spinlock);
	mlog(0, "%s: changing new_master from %u to %u\n",
	     dlm->name, dlm->reco.new_master, master);
	dlm->reco.new_master = master;
}

136
static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
137
{
138
	assert_spin_locked(&dlm->spinlock);
139
	clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140 141
	dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
	dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
142 143 144 145 146 147
}

static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
{
	spin_lock(&dlm->spinlock);
	__dlm_reset_recovery(dlm);
148 149 150 151
	spin_unlock(&dlm->spinlock);
}

/* Worker function used during recovery. */
D
David Howells 已提交
152
void dlm_dispatch_work(struct work_struct *work)
153
{
D
David Howells 已提交
154 155
	struct dlm_ctxt *dlm =
		container_of(work, struct dlm_ctxt, dispatched_work);
156
	LIST_HEAD(tmp_list);
157
	struct dlm_work_item *item, *next;
158
	dlm_workfunc_t *workfunc;
159 160
	int tot=0;

161 162 163 164
	spin_lock(&dlm->work_lock);
	list_splice_init(&dlm->work_list, &tmp_list);
	spin_unlock(&dlm->work_lock);

165
	list_for_each_entry(item, &tmp_list, list) {
166 167 168 169
		tot++;
	}
	mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);

170
	list_for_each_entry_safe(item, next, &tmp_list, list) {
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		workfunc = item->func;
		list_del_init(&item->list);

		/* already have ref on dlm to avoid having
		 * it disappear.  just double-check. */
		BUG_ON(item->dlm != dlm);

		/* this is allowed to sleep and
		 * call network stuff */
		workfunc(item, item->data);

		dlm_put(dlm);
		kfree(item);
	}
}

/*
 * RECOVERY THREAD
 */

K
Kurt Hackel 已提交
191
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
{
	/* wake the recovery thread
	 * this will wake the reco thread in one of three places
	 * 1) sleeping with no recovery happening
	 * 2) sleeping with recovery mastered elsewhere
	 * 3) recovery mastered here, waiting on reco data */

	wake_up(&dlm->dlm_reco_thread_wq);
}

/* Launch the recovery thread */
int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
{
	mlog(0, "starting dlm recovery thread...\n");

	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208
			"dlm_reco-%s", dlm->name);
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	if (IS_ERR(dlm->dlm_reco_thread_task)) {
		mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
		dlm->dlm_reco_thread_task = NULL;
		return -EINVAL;
	}

	return 0;
}

void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
{
	if (dlm->dlm_reco_thread_task) {
		mlog(0, "waiting for dlm recovery thread to exit\n");
		kthread_stop(dlm->dlm_reco_thread_task);
		dlm->dlm_reco_thread_task = NULL;
	}
}



/*
 * this is lame, but here's how recovery works...
 * 1) all recovery threads cluster wide will work on recovering
 *    ONE node at a time
 * 2) negotiate who will take over all the locks for the dead node.
 *    thats right... ALL the locks.
 * 3) once a new master is chosen, everyone scans all locks
 *    and moves aside those mastered by the dead guy
 * 4) each of these locks should be locked until recovery is done
 * 5) the new master collects up all of secondary lock queue info
 *    one lock at a time, forcing each node to communicate back
 *    before continuing
 * 6) each secondary lock queue responds with the full known lock info
 * 7) once the new master has run all its locks, it sends a ALLDONE!
 *    message to everyone
 * 8) upon receiving this message, the secondary queue node unlocks
 *    and responds to the ALLDONE
 * 9) once the new master gets responses from everyone, he unlocks
 *    everything and recovery for this dead node is done
 *10) go back to 2) while there are still dead nodes
 *
 */

252 253 254 255 256 257
static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
{
	struct dlm_reco_node_data *ndata;
	struct dlm_lock_resource *res;

	mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258
	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	     dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
	     dlm->reco.dead_node, dlm->reco.new_master);

	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
		char *st = "unknown";
		switch (ndata->state) {
			case DLM_RECO_NODE_DATA_INIT:
				st = "init";
				break;
			case DLM_RECO_NODE_DATA_REQUESTING:
				st = "requesting";
				break;
			case DLM_RECO_NODE_DATA_DEAD:
				st = "dead";
				break;
			case DLM_RECO_NODE_DATA_RECEIVING:
				st = "receiving";
				break;
			case DLM_RECO_NODE_DATA_REQUESTED:
				st = "requested";
				break;
			case DLM_RECO_NODE_DATA_DONE:
				st = "done";
				break;
			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
				st = "finalize-sent";
				break;
			default:
				st = "bad";
				break;
		}
		mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
		     dlm->name, ndata->node_num, st);
	}
	list_for_each_entry(res, &dlm->reco.resources, recovering) {
		mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
		     dlm->name, res->lockname.len, res->lockname.name);
	}
}
298 299 300 301 302 303 304 305 306 307 308 309

#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)

static int dlm_recovery_thread(void *data)
{
	int status;
	struct dlm_ctxt *dlm = data;
	unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);

	mlog(0, "dlm thread running for %s...\n", dlm->name);

	while (!kthread_should_stop()) {
310
		if (dlm_domain_fully_joined(dlm)) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
			status = dlm_do_recovery(dlm);
			if (status == -EAGAIN) {
				/* do not sleep, recheck immediately. */
				continue;
			}
			if (status < 0)
				mlog_errno(status);
		}

		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
						 kthread_should_stop(),
						 timeout);
	}

	mlog(0, "quitting DLM recovery thread\n");
	return 0;
}

K
Kurt Hackel 已提交
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
/* returns true when the recovery master has contacted us */
static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
{
	int ready;
	spin_lock(&dlm->spinlock);
	ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
	spin_unlock(&dlm->spinlock);
	return ready;
}

/* returns true if node is no longer in the domain
 * could be dead or just not joined */
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
{
	int dead;
	spin_lock(&dlm->spinlock);
345
	dead = !test_bit(node, dlm->domain_map);
K
Kurt Hackel 已提交
346 347 348 349
	spin_unlock(&dlm->spinlock);
	return dead;
}

350 351
/* returns true if node is no longer in the domain
 * could be dead or just not joined */
A
Adrian Bunk 已提交
352
static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
353 354 355 356 357 358 359 360 361
{
	int recovered;
	spin_lock(&dlm->spinlock);
	recovered = !test_bit(node, dlm->recovery_map);
	spin_unlock(&dlm->spinlock);
	return recovered;
}


362
void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
363
{
364 365 366 367 368 369 370
	if (dlm_is_node_dead(dlm, node))
		return;

	printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
	       "domain %s\n", node, dlm->name);

	if (timeout)
371
		wait_event_timeout(dlm->dlm_reco_thread_wq,
372 373 374
				   dlm_is_node_dead(dlm, node),
				   msecs_to_jiffies(timeout));
	else
375 376 377 378
		wait_event(dlm->dlm_reco_thread_wq,
			   dlm_is_node_dead(dlm, node));
}

379
void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
380
{
381 382 383 384 385 386 387
	if (dlm_is_node_recovered(dlm, node))
		return;

	printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
	       "domain %s\n", node, dlm->name);

	if (timeout)
388
		wait_event_timeout(dlm->dlm_reco_thread_wq,
389 390 391
				   dlm_is_node_recovered(dlm, node),
				   msecs_to_jiffies(timeout));
	else
392 393 394 395
		wait_event(dlm->dlm_reco_thread_wq,
			   dlm_is_node_recovered(dlm, node));
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/* callers of the top-level api calls (dlmlock/dlmunlock) should
 * block on the dlm->reco.event when recovery is in progress.
 * the dlm recovery thread will set this state when it begins
 * recovering a dead node (as the new master or not) and clear
 * the state and wake as soon as all affected lock resources have
 * been marked with the RECOVERY flag */
static int dlm_in_recovery(struct dlm_ctxt *dlm)
{
	int in_recovery;
	spin_lock(&dlm->spinlock);
	in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
	spin_unlock(&dlm->spinlock);
	return in_recovery;
}


void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
{
414
	if (dlm_in_recovery(dlm)) {
415
		mlog(0, "%s: reco thread %d in recovery: "
416
		     "state=%d, master=%u, dead=%u\n",
417
		     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418 419 420
		     dlm->reco.state, dlm->reco.new_master,
		     dlm->reco.dead_node);
	}
421 422 423 424 425 426 427
	wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
}

static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
	spin_lock(&dlm->spinlock);
	BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428 429
	printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
	       dlm->name, dlm->reco.dead_node);
430 431 432 433 434 435 436 437 438 439
	dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
	spin_unlock(&dlm->spinlock);
}

static void dlm_end_recovery(struct dlm_ctxt *dlm)
{
	spin_lock(&dlm->spinlock);
	BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
	dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
	spin_unlock(&dlm->spinlock);
440
	printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
441 442 443
	wake_up(&dlm->reco.event);
}

444 445 446 447 448 449 450 451
static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
{
	printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
	       "dead node %u in domain %s\n", dlm->reco.new_master,
	       (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
	       dlm->reco.dead_node, dlm->name);
}

452 453 454
static int dlm_do_recovery(struct dlm_ctxt *dlm)
{
	int status = 0;
K
Kurt Hackel 已提交
455
	int ret;
456 457 458 459 460 461 462 463 464

	spin_lock(&dlm->spinlock);

	/* check to see if the new master has died */
	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
	    test_bit(dlm->reco.new_master, dlm->recovery_map)) {
		mlog(0, "new master %u died while recovering %u!\n",
		     dlm->reco.new_master, dlm->reco.dead_node);
		/* unset the new_master, leave dead_node */
465
		dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
466 467 468 469 470 471
	}

	/* select a target to recover */
	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
		int bit;

472
		bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
473
		if (bit >= O2NM_MAX_NODES || bit < 0)
474
			dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
475
		else
476
			dlm_set_reco_dead_node(dlm, bit);
477 478 479 480
	} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
		/* BUG? */
		mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
		     dlm->reco.dead_node);
481
		dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
482 483 484 485 486 487 488 489
	}

	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
		// mlog(0, "nothing to recover!  sleeping now!\n");
		spin_unlock(&dlm->spinlock);
		/* return to main thread loop and sleep. */
		return 0;
	}
490
	mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
491
	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
492 493 494 495 496 497 498 499 500 501 502
	     dlm->reco.dead_node);
	spin_unlock(&dlm->spinlock);

	/* take write barrier */
	/* (stops the list reshuffling thread, proxy ast handling) */
	dlm_begin_recovery(dlm);

	if (dlm->reco.new_master == dlm->node_num)
		goto master_here;

	if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
K
Kurt Hackel 已提交
503 504 505 506 507 508
		/* choose a new master, returns 0 if this node
		 * is the master, -EEXIST if it's another node.
		 * this does not return until a new master is chosen
		 * or recovery completes entirely. */
		ret = dlm_pick_recovery_master(dlm);
		if (!ret) {
509 510 511 512 513
			/* already notified everyone.  go. */
			goto master_here;
		}
		mlog(0, "another node will master this recovery session.\n");
	}
514 515

	dlm_print_recovery_master(dlm);
516 517 518 519 520 521 522 523 524 525

	/* it is safe to start everything back up here
	 * because all of the dead node's lock resources
	 * have been marked as in-recovery */
	dlm_end_recovery(dlm);

	/* sleep out in main dlm_recovery_thread loop. */
	return 0;

master_here:
526
	dlm_print_recovery_master(dlm);
527 528 529

	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
	if (status < 0) {
530
		/* we should never hit this anymore */
531 532
		mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
		     "retrying.\n", dlm->name, status, dlm->reco.dead_node);
K
Kurt Hackel 已提交
533 534 535
		/* yield a bit to allow any final network messages
		 * to get handled on remaining nodes */
		msleep(100);
536 537
	} else {
		/* success!  see if any other nodes need recovery */
K
Kurt Hackel 已提交
538 539
		mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
		     dlm->name, dlm->reco.dead_node, dlm->node_num);
J
Junxiao Bi 已提交
540 541 542 543
		spin_lock(&dlm->spinlock);
		__dlm_reset_recovery(dlm);
		dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
		spin_unlock(&dlm->spinlock);
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	}
	dlm_end_recovery(dlm);

	/* continue and look for another dead node */
	return -EAGAIN;
}

static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
{
	int status = 0;
	struct dlm_reco_node_data *ndata;
	int all_nodes_done;
	int destroy = 0;
	int pass = 0;

559 560 561 562 563 564 565 566 567 568
	do {
		/* we have become recovery master.  there is no escaping
		 * this, so just keep trying until we get it. */
		status = dlm_init_recovery_area(dlm, dead_node);
		if (status < 0) {
			mlog(ML_ERROR, "%s: failed to alloc recovery area, "
			     "retrying\n", dlm->name);
			msleep(1000);
		}
	} while (status != 0);
569 570 571

	/* safe to access the node data list without a lock, since this
	 * process is the only one to change the list */
572
	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
573 574 575
		BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
		ndata->state = DLM_RECO_NODE_DATA_REQUESTING;

576
		mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577 578 579 580 581 582 583
		     ndata->node_num);

		if (ndata->node_num == dlm->node_num) {
			ndata->state = DLM_RECO_NODE_DATA_DONE;
			continue;
		}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
		do {
			status = dlm_request_all_locks(dlm, ndata->node_num,
						       dead_node);
			if (status < 0) {
				mlog_errno(status);
				if (dlm_is_host_down(status)) {
					/* node died, ignore it for recovery */
					status = 0;
					ndata->state = DLM_RECO_NODE_DATA_DEAD;
					/* wait for the domain map to catch up
					 * with the network state. */
					wait_event_timeout(dlm->dlm_reco_thread_wq,
							   dlm_is_node_dead(dlm,
								ndata->node_num),
							   msecs_to_jiffies(1000));
					mlog(0, "waited 1 sec for %u, "
					     "dead? %s\n", ndata->node_num,
					     dlm_is_node_dead(dlm, ndata->node_num) ?
					     "yes" : "no");
				} else {
					/* -ENOMEM on the other node */
					mlog(0, "%s: node %u returned "
					     "%d during recovery, retrying "
					     "after a short wait\n",
					     dlm->name, ndata->node_num,
					     status);
					msleep(100);
				}
612
			}
613
		} while (status != 0);
614

615
		spin_lock(&dlm_reco_state_lock);
616 617 618 619 620 621 622 623 624 625
		switch (ndata->state) {
			case DLM_RECO_NODE_DATA_INIT:
			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
			case DLM_RECO_NODE_DATA_REQUESTED:
				BUG();
				break;
			case DLM_RECO_NODE_DATA_DEAD:
				mlog(0, "node %u died after requesting "
				     "recovery info for node %u\n",
				     ndata->node_num, dead_node);
626 627 628
				/* fine.  don't need this node's info.
				 * continue without it. */
				break;
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
			case DLM_RECO_NODE_DATA_REQUESTING:
				ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
				mlog(0, "now receiving recovery data from "
				     "node %u for dead node %u\n",
				     ndata->node_num, dead_node);
				break;
			case DLM_RECO_NODE_DATA_RECEIVING:
				mlog(0, "already receiving recovery data from "
				     "node %u for dead node %u\n",
				     ndata->node_num, dead_node);
				break;
			case DLM_RECO_NODE_DATA_DONE:
				mlog(0, "already DONE receiving recovery data "
				     "from node %u for dead node %u\n",
				     ndata->node_num, dead_node);
				break;
		}
646
		spin_unlock(&dlm_reco_state_lock);
647 648
	}

649
	mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650 651 652 653 654 655 656 657 658

	/* nodes should be sending reco data now
	 * just need to wait */

	while (1) {
		/* check all the nodes now to see if we are
		 * done, or if anyone died */
		all_nodes_done = 1;
		spin_lock(&dlm_reco_state_lock);
659
		list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660 661 662 663 664 665 666 667 668 669 670
			mlog(0, "checking recovery state of node %u\n",
			     ndata->node_num);
			switch (ndata->state) {
				case DLM_RECO_NODE_DATA_INIT:
				case DLM_RECO_NODE_DATA_REQUESTING:
					mlog(ML_ERROR, "bad ndata state for "
					     "node %u: state=%d\n",
					     ndata->node_num, ndata->state);
					BUG();
					break;
				case DLM_RECO_NODE_DATA_DEAD:
671
					mlog(0, "node %u died after "
672 673 674
					     "requesting recovery info for "
					     "node %u\n", ndata->node_num,
					     dead_node);
675
					break;
676 677
				case DLM_RECO_NODE_DATA_RECEIVING:
				case DLM_RECO_NODE_DATA_REQUESTED:
678 679 680 681
					mlog(0, "%s: node %u still in state %s\n",
					     dlm->name, ndata->node_num,
					     ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
					     "receiving" : "requested");
682 683 684
					all_nodes_done = 0;
					break;
				case DLM_RECO_NODE_DATA_DONE:
685 686
					mlog(0, "%s: node %u state is done\n",
					     dlm->name, ndata->node_num);
687 688
					break;
				case DLM_RECO_NODE_DATA_FINALIZE_SENT:
689 690
					mlog(0, "%s: node %u state is finalize\n",
					     dlm->name, ndata->node_num);
691 692 693 694 695 696 697 698 699 700
					break;
			}
		}
		spin_unlock(&dlm_reco_state_lock);

		mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
		     all_nodes_done?"yes":"no");
		if (all_nodes_done) {
			int ret;

J
Junxiao Bi 已提交
701 702 703 704 705 706 707 708
			/* Set this flag on recovery master to avoid
			 * a new recovery for another dead node start
			 * before the recovery is not done. That may
			 * cause recovery hung.*/
			spin_lock(&dlm->spinlock);
			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
			spin_unlock(&dlm->spinlock);

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
	 		 * just send a finalize message to everyone and
	 		 * clean up */
			mlog(0, "all nodes are done! send finalize\n");
			ret = dlm_send_finalize_reco_message(dlm);
			if (ret < 0)
				mlog_errno(ret);

			spin_lock(&dlm->spinlock);
			dlm_finish_local_lockres_recovery(dlm, dead_node,
							  dlm->node_num);
			spin_unlock(&dlm->spinlock);
			mlog(0, "should be done with recovery!\n");

			mlog(0, "finishing recovery of %s at %lu, "
			     "dead=%u, this=%u, new=%u\n", dlm->name,
			     jiffies, dlm->reco.dead_node,
			     dlm->node_num, dlm->reco.new_master);
			destroy = 1;
728
			status = 0;
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
			/* rescan everything marked dirty along the way */
			dlm_kick_thread(dlm, NULL);
			break;
		}
		/* wait to be signalled, with periodic timeout
		 * to check for node death */
		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
					 kthread_should_stop(),
					 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));

	}

	if (destroy)
		dlm_destroy_recovery_area(dlm, dead_node);

	return status;
}

static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
{
	int num=0;
	struct dlm_reco_node_data *ndata;

	spin_lock(&dlm->spinlock);
	memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
	/* nodes can only be removed (by dying) after dropping
	 * this lock, and death will be trapped later, so this should do */
	spin_unlock(&dlm->spinlock);

	while (1) {
		num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
		if (num >= O2NM_MAX_NODES) {
			break;
		}
		BUG_ON(num == dead_node);

765
		ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
		if (!ndata) {
			dlm_destroy_recovery_area(dlm, dead_node);
			return -ENOMEM;
		}
		ndata->node_num = num;
		ndata->state = DLM_RECO_NODE_DATA_INIT;
		spin_lock(&dlm_reco_state_lock);
		list_add_tail(&ndata->list, &dlm->reco.node_data);
		spin_unlock(&dlm_reco_state_lock);
		num++;
	}

	return 0;
}

static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
{
783
	struct dlm_reco_node_data *ndata, *next;
784 785 786 787 788 789
	LIST_HEAD(tmplist);

	spin_lock(&dlm_reco_state_lock);
	list_splice_init(&dlm->reco.node_data, &tmplist);
	spin_unlock(&dlm_reco_state_lock);

790
	list_for_each_entry_safe(ndata, next, &tmplist, list) {
791 792 793 794 795 796 797 798 799
		list_del_init(&ndata->list);
		kfree(ndata);
	}
}

static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
				 u8 dead_node)
{
	struct dlm_lock_request lr;
800
	int ret;
801
	int status;
802 803 804 805 806 807 808 809 810 811 812 813 814

	mlog(0, "\n");


	mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
		  "to %u\n", dead_node, request_from);

	memset(&lr, 0, sizeof(lr));
	lr.node_idx = dlm->node_num;
	lr.dead_node = dead_node;

	// send message
	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
815
				 &lr, sizeof(lr), request_from, &status);
816 817 818

	/* negative status is handled by caller */
	if (ret < 0)
819 820 821
		mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
		     "to recover dead node %u\n", dlm->name, ret,
		     request_from, dead_node);
822 823
	else
		ret = status;
824 825 826 827 828 829
	// return from here, then
	// sleep until all received or error
	return ret;

}

830 831
int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
				  void **ret_data)
832 833 834 835 836 837 838 839 840
{
	struct dlm_ctxt *dlm = data;
	struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
	char *buf = NULL;
	struct dlm_work_item *item = NULL;

	if (!dlm_grab(dlm))
		return -EINVAL;

841 842 843 844
	if (lr->dead_node != dlm->reco.dead_node) {
		mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
		     "dead_node is %u\n", dlm->name, lr->node_idx,
		     lr->dead_node, dlm->reco.dead_node);
845
		dlm_print_reco_node_status(dlm);
846 847 848 849
		/* this is a hack */
		dlm_put(dlm);
		return -ENOMEM;
	}
850 851
	BUG_ON(lr->dead_node != dlm->reco.dead_node);

852
	item = kzalloc(sizeof(*item), GFP_NOFS);
853 854 855 856 857 858
	if (!item) {
		dlm_put(dlm);
		return -ENOMEM;
	}

	/* this will get freed by dlm_request_all_locks_worker */
859
	buf = (char *) __get_free_page(GFP_NOFS);
860 861 862 863 864 865 866 867 868 869 870 871 872 873
	if (!buf) {
		kfree(item);
		dlm_put(dlm);
		return -ENOMEM;
	}

	/* queue up work for dlm_request_all_locks_worker */
	dlm_grab(dlm);  /* get an extra ref for the work item */
	dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
	item->u.ral.reco_master = lr->node_idx;
	item->u.ral.dead_node = lr->dead_node;
	spin_lock(&dlm->work_lock);
	list_add_tail(&item->list, &dlm->work_list);
	spin_unlock(&dlm->work_lock);
874
	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
875 876 877 878 879 880 881 882 883 884 885 886 887

	dlm_put(dlm);
	return 0;
}

static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
{
	struct dlm_migratable_lockres *mres;
	struct dlm_lock_resource *res;
	struct dlm_ctxt *dlm;
	LIST_HEAD(resources);
	int ret;
	u8 dead_node, reco_master;
888
	int skip_all_done = 0;
889 890 891 892

	dlm = item->dlm;
	dead_node = item->u.ral.dead_node;
	reco_master = item->u.ral.reco_master;
K
Kurt Hackel 已提交
893 894
	mres = (struct dlm_migratable_lockres *)data;

895 896 897
	mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
	     dlm->name, dead_node, reco_master);

K
Kurt Hackel 已提交
898 899
	if (dead_node != dlm->reco.dead_node ||
	    reco_master != dlm->reco.new_master) {
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
		/* worker could have been created before the recovery master
		 * died.  if so, do not continue, but do not error. */
		if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
			mlog(ML_NOTICE, "%s: will not send recovery state, "
			     "recovery master %u died, thread=(dead=%u,mas=%u)"
			     " current=(dead=%u,mas=%u)\n", dlm->name,
			     reco_master, dead_node, reco_master,
			     dlm->reco.dead_node, dlm->reco.new_master);
		} else {
			mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
			     "master=%u), request(dead=%u, master=%u)\n",
			     dlm->name, dlm->reco.dead_node,
			     dlm->reco.new_master, dead_node, reco_master);
		}
		goto leave;
K
Kurt Hackel 已提交
915
	}
916 917 918 919 920 921 922 923 924 925

	/* lock resources should have already been moved to the
 	 * dlm->reco.resources list.  now move items from that list
 	 * to a temp list if the dead owner matches.  note that the
	 * whole cluster recovers only one node at a time, so we
	 * can safely move UNKNOWN lock resources for each recovery
	 * session. */
	dlm_move_reco_locks_to_list(dlm, &resources, dead_node);

	/* now we can begin blasting lockreses without the dlm lock */
926 927 928

	/* any errors returned will be due to the new_master dying,
	 * the dlm_reco_thread should detect this */
929
	list_for_each_entry(res, &resources, recovering) {
930 931
		ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
				   	DLM_MRES_RECOVERY);
932
		if (ret < 0) {
933 934 935
			mlog(ML_ERROR, "%s: node %u went down while sending "
			     "recovery state for dead node %u, ret=%d\n", dlm->name,
			     reco_master, dead_node, ret);
936 937 938
			skip_all_done = 1;
			break;
		}
939 940 941 942 943 944 945
	}

	/* move the resources back to the list */
	spin_lock(&dlm->spinlock);
	list_splice_init(&resources, &dlm->reco.resources);
	spin_unlock(&dlm->spinlock);

946 947 948
	if (!skip_all_done) {
		ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
		if (ret < 0) {
949 950 951
			mlog(ML_ERROR, "%s: node %u went down while sending "
			     "recovery all-done for dead node %u, ret=%d\n",
			     dlm->name, reco_master, dead_node, ret);
952 953
		}
	}
954
leave:
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	free_page((unsigned long)data);
}


static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
{
	int ret, tmpret;
	struct dlm_reco_data_done done_msg;

	memset(&done_msg, 0, sizeof(done_msg));
	done_msg.node_idx = dlm->node_num;
	done_msg.dead_node = dead_node;
	mlog(0, "sending DATA DONE message to %u, "
	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
	     done_msg.dead_node);

	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
				 sizeof(done_msg), send_to, &tmpret);
973
	if (ret < 0) {
974 975 976
		mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
		     "to recover dead node %u\n", dlm->name, ret, send_to,
		     dead_node);
977 978 979 980
		if (!dlm_is_host_down(ret)) {
			BUG();
		}
	} else
981 982 983 984 985
		ret = tmpret;
	return ret;
}


986 987
int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
			       void **ret_data)
988 989 990 991 992 993 994 995 996 997 998 999
{
	struct dlm_ctxt *dlm = data;
	struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
	struct dlm_reco_node_data *ndata = NULL;
	int ret = -EINVAL;

	if (!dlm_grab(dlm))
		return -EINVAL;

	mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
	     "node_idx=%u, this node=%u\n", done->dead_node,
	     dlm->reco.dead_node, done->node_idx, dlm->node_num);
1000 1001 1002 1003 1004

	mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
			"Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
			"node_idx=%u, this node=%u\n", done->dead_node,
			dlm->reco.dead_node, done->node_idx, dlm->node_num);
1005 1006

	spin_lock(&dlm_reco_state_lock);
1007
	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1008 1009 1010 1011
		if (ndata->node_num != done->node_idx)
			continue;

		switch (ndata->state) {
K
Kurt Hackel 已提交
1012
			/* should have moved beyond INIT but not to FINALIZE yet */
1013 1014 1015 1016 1017 1018 1019 1020
			case DLM_RECO_NODE_DATA_INIT:
			case DLM_RECO_NODE_DATA_DEAD:
			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
				mlog(ML_ERROR, "bad ndata state for node %u:"
				     " state=%d\n", ndata->node_num,
				     ndata->state);
				BUG();
				break;
K
Kurt Hackel 已提交
1021 1022 1023
			/* these states are possible at this point, anywhere along
			 * the line of recovery */
			case DLM_RECO_NODE_DATA_DONE:
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
			case DLM_RECO_NODE_DATA_RECEIVING:
			case DLM_RECO_NODE_DATA_REQUESTED:
			case DLM_RECO_NODE_DATA_REQUESTING:
				mlog(0, "node %u is DONE sending "
					  "recovery data!\n",
					  ndata->node_num);

				ndata->state = DLM_RECO_NODE_DATA_DONE;
				ret = 0;
				break;
		}
	}
	spin_unlock(&dlm_reco_state_lock);

	/* wake the recovery thread, some node is done */
	if (!ret)
		dlm_kick_recovery_thread(dlm);

	if (ret < 0)
		mlog(ML_ERROR, "failed to find recovery node data for node "
		     "%u\n", done->node_idx);
	dlm_put(dlm);

	mlog(0, "leaving reco data done handler, ret=%d\n", ret);
	return ret;
}

static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
					struct list_head *list,
				       	u8 dead_node)
{
1055
	struct dlm_lock_resource *res, *next;
K
Kurt Hackel 已提交
1056
	struct dlm_lock *lock;
1057 1058

	spin_lock(&dlm->spinlock);
1059
	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
K
Kurt Hackel 已提交
1060 1061
		/* always prune any $RECOVERY entries for dead nodes,
		 * otherwise hangs can occur during later recovery */
1062
		if (dlm_is_recovery_lock(res->lockname.name,
K
Kurt Hackel 已提交
1063 1064 1065 1066 1067 1068
					 res->lockname.len)) {
			spin_lock(&res->spinlock);
			list_for_each_entry(lock, &res->granted, list) {
				if (lock->ml.node == dead_node) {
					mlog(0, "AHA! there was "
					     "a $RECOVERY lock for dead "
1069
					     "node %u (%s)!\n",
K
Kurt Hackel 已提交
1070 1071 1072
					     dead_node, dlm->name);
					list_del_init(&lock->list);
					dlm_lock_put(lock);
1073 1074 1075
					/* Can't schedule DLM_UNLOCK_FREE_LOCK
					 * - do manually */
					dlm_lock_put(lock);
K
Kurt Hackel 已提交
1076 1077 1078 1079
					break;
				}
			}
			spin_unlock(&res->spinlock);
1080
			continue;
K
Kurt Hackel 已提交
1081 1082
		}

1083 1084 1085 1086
		if (res->owner == dead_node) {
			mlog(0, "found lockres owned by dead node while "
				  "doing recovery for node %u. sending it.\n",
				  dead_node);
A
Akinobu Mita 已提交
1087
			list_move_tail(&res->recovering, list);
1088 1089 1090
		} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
			mlog(0, "found UNKNOWN owner while doing recovery "
				  "for node %u. sending it.\n", dead_node);
A
Akinobu Mita 已提交
1091
			list_move_tail(&res->recovering, list);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		}
	}
	spin_unlock(&dlm->spinlock);
}

static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
{
	int total_locks = 0;
	struct list_head *iter, *queue = &res->granted;
	int i;

	for (i=0; i<3; i++) {
		list_for_each(iter, queue)
			total_locks++;
		queue++;
	}
	return total_locks;
}


static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
				      struct dlm_migratable_lockres *mres,
				      u8 send_to,
				      struct dlm_lock_resource *res,
				      int total_locks)
{
	u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
	int mres_total_locks = be32_to_cpu(mres->total_locks);
	int sz, ret = 0, status = 0;
	u8 orig_flags = mres->flags,
	   orig_master = mres->master;

	BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
	if (!mres->num_locks)
		return 0;

	sz = sizeof(struct dlm_migratable_lockres) +
		(mres->num_locks * sizeof(struct dlm_migratable_lock));

	/* add an all-done flag if we reached the last lock */
	orig_flags = mres->flags;
	BUG_ON(total_locks > mres_total_locks);
	if (total_locks == mres_total_locks)
		mres->flags |= DLM_MRES_ALL_DONE;

1137 1138
	mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
	     dlm->name, res->lockname.len, res->lockname.name,
1139
	     orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1140 1141
	     send_to);

1142 1143 1144 1145 1146 1147
	/* send it */
	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
				 sz, send_to, &status);
	if (ret < 0) {
		/* XXX: negative status is not handled.
		 * this will end up killing this node. */
1148 1149 1150 1151 1152
		mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
		     "node %u (%s)\n", dlm->name, mres->lockname_len,
		     mres->lockname, ret, send_to,
		     (orig_flags & DLM_MRES_MIGRATION ?
		      "migration" : "recovery"));
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	} else {
		/* might get an -ENOMEM back here */
		ret = status;
		if (ret < 0) {
			mlog_errno(ret);

			if (ret == -EFAULT) {
				mlog(ML_ERROR, "node %u told me to kill "
				     "myself!\n", send_to);
				BUG();
			}
		}
	}

	/* zero and reinit the message buffer */
	dlm_init_migratable_lockres(mres, res->lockname.name,
				    res->lockname.len, mres_total_locks,
				    mig_cookie, orig_flags, orig_master);
	return ret;
}

static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
					const char *lockname, int namelen,
					int total_locks, u64 cookie,
					u8 flags, u8 master)
{
	/* mres here is one full page */
1180
	clear_page(mres);
1181 1182 1183 1184 1185 1186 1187 1188 1189
	mres->lockname_len = namelen;
	memcpy(mres->lockname, lockname, namelen);
	mres->num_locks = 0;
	mres->total_locks = cpu_to_be32(total_locks);
	mres->mig_cookie = cpu_to_be64(cookie);
	mres->flags = flags;
	mres->master = master;
}

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
					  struct dlm_migratable_lockres *mres,
					  int queue)
{
	if (!lock->lksb)
	       return;

	/* Ignore lvb in all locks in the blocked list */
	if (queue == DLM_BLOCKED_LIST)
		return;

	/* Only consider lvbs in locks with granted EX or PR lock levels */
	if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
		return;

	if (dlm_lvb_is_empty(mres->lvb)) {
		memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
		return;
	}

	/* Ensure the lvb copied for migration matches in other valid locks */
	if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
		return;

	mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
	     "node=%u\n",
	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
	     lock->lockres->lockname.len, lock->lockres->lockname.name,
	     lock->ml.node);
	dlm_print_one_lock_resource(lock->lockres);
	BUG();
}
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

/* returns 1 if this lock fills the network structure,
 * 0 otherwise */
static int dlm_add_lock_to_array(struct dlm_lock *lock,
				 struct dlm_migratable_lockres *mres, int queue)
{
	struct dlm_migratable_lock *ml;
	int lock_num = mres->num_locks;

	ml = &(mres->ml[lock_num]);
	ml->cookie = lock->ml.cookie;
	ml->type = lock->ml.type;
	ml->convert_type = lock->ml.convert_type;
	ml->highest_blocked = lock->ml.highest_blocked;
	ml->list = queue;
	if (lock->lksb) {
		ml->flags = lock->lksb->flags;
1240
		dlm_prepare_lvb_for_migration(lock, mres, queue);
1241 1242 1243 1244 1245 1246 1247 1248 1249
	}
	ml->node = lock->ml.node;
	mres->num_locks++;
	/* we reached the max, send this network message */
	if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
		return 1;
	return 0;
}

1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
			       struct dlm_migratable_lockres *mres)
{
	struct dlm_lock dummy;
	memset(&dummy, 0, sizeof(dummy));
	dummy.ml.cookie = 0;
	dummy.ml.type = LKM_IVMODE;
	dummy.ml.convert_type = LKM_IVMODE;
	dummy.ml.highest_blocked = LKM_IVMODE;
	dummy.lksb = NULL;
	dummy.ml.node = dlm->node_num;
	dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
}

static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
				    struct dlm_migratable_lock *ml,
				    u8 *nodenum)
{
	if (unlikely(ml->cookie == 0 &&
	    ml->type == LKM_IVMODE &&
	    ml->convert_type == LKM_IVMODE &&
	    ml->highest_blocked == LKM_IVMODE &&
	    ml->list == DLM_BLOCKED_LIST)) {
		*nodenum = ml->node;
		return 1;
	}
	return 0;
}
1278 1279 1280 1281 1282

int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
			 struct dlm_migratable_lockres *mres,
			 u8 send_to, u8 flags)
{
1283
	struct list_head *queue;
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	int total_locks, i;
	u64 mig_cookie = 0;
	struct dlm_lock *lock;
	int ret = 0;

	BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));

	mlog(0, "sending to %u\n", send_to);

	total_locks = dlm_num_locks_in_lockres(res);
	if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
		/* rare, but possible */
		mlog(0, "argh.  lockres has %d locks.  this will "
			  "require more than one network packet to "
			  "migrate\n", total_locks);
		mig_cookie = dlm_get_next_mig_cookie();
	}

	dlm_init_migratable_lockres(mres, res->lockname.name,
				    res->lockname.len, total_locks,
				    mig_cookie, flags, res->owner);

	total_locks = 0;
	for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
		queue = dlm_list_idx_to_ptr(res, i);
1309
		list_for_each_entry(lock, queue, list) {
1310 1311 1312 1313 1314 1315 1316 1317 1318
			/* add another lock. */
			total_locks++;
			if (!dlm_add_lock_to_array(lock, mres, i))
				continue;

			/* this filled the lock message,
			 * we must send it immediately. */
			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
						       res, total_locks);
1319 1320
			if (ret < 0)
				goto error;
1321 1322
		}
	}
1323 1324 1325 1326 1327 1328 1329 1330
	if (total_locks == 0) {
		/* send a dummy lock to indicate a mastery reference only */
		mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
		     dlm->name, res->lockname.len, res->lockname.name,
		     send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
		     "migration");
		dlm_add_dummy_lock(dlm, mres);
	}
1331 1332
	/* flush any remaining locks */
	ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1333 1334 1335 1336 1337 1338 1339 1340
	if (ret < 0)
		goto error;
	return ret;

error:
	mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
	     dlm->name, ret);
	if (!dlm_is_host_down(ret))
1341
		BUG();
1342 1343 1344 1345
	mlog(0, "%s: node %u went down while sending %s "
	     "lockres %.*s\n", dlm->name, send_to,
	     flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
	     res->lockname.len, res->lockname.name);
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
	return ret;
}



/*
 * this message will contain no more than one page worth of
 * recovery data, and it will work on only one lockres.
 * there may be many locks in this page, and we may need to wait
 * for additional packets to complete all the locks (rare, but
 * possible).
 */
/*
 * NOTE: the allocation error cases here are scary
 * we really cannot afford to fail an alloc in recovery
 * do we spin?  returning an error only delays the problem really
 */

1364 1365
int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
			    void **ret_data)
1366 1367 1368 1369 1370 1371
{
	struct dlm_ctxt *dlm = data;
	struct dlm_migratable_lockres *mres =
		(struct dlm_migratable_lockres *)msg->buf;
	int ret = 0;
	u8 real_master;
1372
	u8 extra_refs = 0;
1373 1374 1375
	char *buf = NULL;
	struct dlm_work_item *item = NULL;
	struct dlm_lock_resource *res = NULL;
1376
	unsigned int hash;
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395

	if (!dlm_grab(dlm))
		return -EINVAL;

	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));

	real_master = mres->master;
	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
		/* cannot migrate a lockres with no master */
		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
	}

	mlog(0, "%s message received from node %u\n",
		  (mres->flags & DLM_MRES_RECOVERY) ?
		  "recovery" : "migration", mres->master);
	if (mres->flags & DLM_MRES_ALL_DONE)
		mlog(0, "all done flag.  all lockres data received!\n");

	ret = -ENOMEM;
1396
	buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1397
	item = kzalloc(sizeof(*item), GFP_NOFS);
1398 1399 1400 1401 1402 1403
	if (!buf || !item)
		goto leave;

	/* lookup the lock to see if we have a secondary queue for this
	 * already...  just add the locks in and this will have its owner
	 * and RECOVERY flag changed when it completes. */
1404 1405
	hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
	spin_lock(&dlm->spinlock);
1406
	res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1407
			hash);
1408 1409 1410 1411
	if (res) {
	 	/* this will get a ref on res */
		/* mark it as recovering/migrating and hash it */
		spin_lock(&res->spinlock);
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
			mlog(0, "%s: node is attempting to migrate "
				"lockres %.*s, but marked as dropping "
				" ref!\n", dlm->name,
				mres->lockname_len, mres->lockname);
			ret = -EINVAL;
			spin_unlock(&res->spinlock);
			spin_unlock(&dlm->spinlock);
			dlm_lockres_put(res);
			goto leave;
		}

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
		if (mres->flags & DLM_MRES_RECOVERY) {
			res->state |= DLM_LOCK_RES_RECOVERING;
		} else {
			if (res->state & DLM_LOCK_RES_MIGRATING) {
				/* this is at least the second
				 * lockres message */
				mlog(0, "lock %.*s is already migrating\n",
					  mres->lockname_len,
					  mres->lockname);
			} else if (res->state & DLM_LOCK_RES_RECOVERING) {
				/* caller should BUG */
				mlog(ML_ERROR, "node is attempting to migrate "
				     "lock %.*s, but marked as recovering!\n",
				     mres->lockname_len, mres->lockname);
				ret = -EFAULT;
				spin_unlock(&res->spinlock);
1440
				spin_unlock(&dlm->spinlock);
1441
				dlm_lockres_put(res);
1442 1443 1444 1445 1446
				goto leave;
			}
			res->state |= DLM_LOCK_RES_MIGRATING;
		}
		spin_unlock(&res->spinlock);
1447
		spin_unlock(&dlm->spinlock);
1448
	} else {
1449
		spin_unlock(&dlm->spinlock);
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
		/* need to allocate, just like if it was
		 * mastered here normally  */
		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
		if (!res)
			goto leave;

		/* to match the ref that we would have gotten if
		 * dlm_lookup_lockres had succeeded */
		dlm_lockres_get(res);

		/* mark it as recovering/migrating and hash it */
		if (mres->flags & DLM_MRES_RECOVERY)
			res->state |= DLM_LOCK_RES_RECOVERING;
		else
			res->state |= DLM_LOCK_RES_MIGRATING;

		spin_lock(&dlm->spinlock);
		__dlm_insert_lockres(dlm, res);
		spin_unlock(&dlm->spinlock);

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
		/* Add an extra ref for this lock-less lockres lest the
		 * dlm_thread purges it before we get the chance to add
		 * locks to it */
		dlm_lockres_get(res);

		/* There are three refs that need to be put.
		 * 1. Taken above.
		 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
		 * 3. dlm_lookup_lockres()
		 * The first one is handled at the end of this function. The
		 * other two are handled in the worker thread after locks have
		 * been attached. Yes, we don't wait for purge time to match
		 * kref_init. The lockres will still have atleast one ref
		 * added because it is in the hash __dlm_insert_lockres() */
		extra_refs++;

1486 1487 1488 1489 1490
		/* now that the new lockres is inserted,
		 * make it usable by other processes */
		spin_lock(&res->spinlock);
		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
		spin_unlock(&res->spinlock);
1491
		wake_up(&res->wq);
1492 1493 1494 1495 1496 1497
	}

	/* at this point we have allocated everything we need,
	 * and we have a hashed lockres with an extra ref and
	 * the proper res->state flags. */
	ret = 0;
1498 1499 1500 1501
	spin_lock(&res->spinlock);
	/* drop this either when master requery finds a different master
	 * or when a lock is added by the recovery worker */
	dlm_lockres_grab_inflight_ref(dlm, res);
1502 1503 1504 1505 1506 1507 1508
	if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
		/* migration cannot have an unknown master */
		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
		mlog(0, "recovery has passed me a lockres with an "
			  "unknown owner.. will need to requery: "
			  "%.*s\n", mres->lockname_len, mres->lockname);
	} else {
1509 1510
		/* take a reference now to pin the lockres, drop it
		 * when locks are added in the worker */
1511 1512
		dlm_change_lockres_owner(dlm, res, dlm->node_num);
	}
1513
	spin_unlock(&res->spinlock);
1514 1515 1516 1517 1518 1519 1520

	/* queue up work for dlm_mig_lockres_worker */
	dlm_grab(dlm);  /* get an extra ref for the work item */
	memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
	dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
	item->u.ml.lockres = res; /* already have a ref */
	item->u.ml.real_master = real_master;
1521
	item->u.ml.extra_ref = extra_refs;
1522 1523 1524
	spin_lock(&dlm->work_lock);
	list_add_tail(&item->list, &dlm->work_list);
	spin_unlock(&dlm->work_lock);
1525
	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1526 1527

leave:
1528 1529 1530 1531
	/* One extra ref taken needs to be put here */
	if (extra_refs)
		dlm_lockres_put(res);

1532 1533
	dlm_put(dlm);
	if (ret < 0) {
1534 1535
		kfree(buf);
		kfree(item);
T
Tao Ma 已提交
1536
		mlog_errno(ret);
1537 1538 1539 1540 1541 1542 1543 1544
	}

	return ret;
}


static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
{
1545
	struct dlm_ctxt *dlm;
1546 1547 1548 1549
	struct dlm_migratable_lockres *mres;
	int ret = 0;
	struct dlm_lock_resource *res;
	u8 real_master;
1550
	u8 extra_ref;
1551 1552 1553 1554 1555 1556

	dlm = item->dlm;
	mres = (struct dlm_migratable_lockres *)data;

	res = item->u.ml.lockres;
	real_master = item->u.ml.real_master;
1557
	extra_ref = item->u.ml.extra_ref;
1558 1559 1560 1561 1562 1563 1564

	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
		/* this case is super-rare. only occurs if
		 * node death happens during migration. */
again:
		ret = dlm_lockres_master_requery(dlm, res, &real_master);
		if (ret < 0) {
K
Kurt Hackel 已提交
1565
			mlog(0, "dlm_lockres_master_requery ret=%d\n",
1566 1567 1568 1569 1570 1571 1572 1573
				  ret);
			goto again;
		}
		if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
			mlog(0, "lockres %.*s not claimed.  "
				   "this node will take it.\n",
				   res->lockname.len, res->lockname.name);
		} else {
1574 1575 1576
			spin_lock(&res->spinlock);
			dlm_lockres_drop_inflight_ref(dlm, res);
			spin_unlock(&res->spinlock);
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
			mlog(0, "master needs to respond to sender "
				  "that node %u still owns %.*s\n",
				  real_master, res->lockname.len,
				  res->lockname.name);
			/* cannot touch this lockres */
			goto leave;
		}
	}

	ret = dlm_process_recovery_data(dlm, res, mres);
	if (ret < 0)
		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
	else
		mlog(0, "dlm_process_recovery_data succeeded\n");

	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
	                   (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
		ret = dlm_finish_migration(dlm, res, mres->master);
		if (ret < 0)
			mlog_errno(ret);
	}

leave:
1600 1601 1602 1603 1604 1605
	/* See comment in dlm_mig_lockres_handler() */
	if (res) {
		if (extra_ref)
			dlm_lockres_put(res);
		dlm_lockres_put(res);
	}
1606 1607 1608 1609 1610
	kfree(data);
}



1611 1612 1613
static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
				      struct dlm_lock_resource *res,
				      u8 *real_master)
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
{
	struct dlm_node_iter iter;
	int nodenum;
	int ret = 0;

	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;

	/* we only reach here if one of the two nodes in a
	 * migration died while the migration was in progress.
	 * at this point we need to requery the master.  we
	 * know that the new_master got as far as creating
	 * an mle on at least one node, but we do not know
	 * if any nodes had actually cleared the mle and set
	 * the master to the new_master.  the old master
	 * is supposed to set the owner to UNKNOWN in the
	 * event of a new_master death, so the only possible
	 * responses that we can get from nodes here are
	 * that the master is new_master, or that the master
	 * is UNKNOWN.
	 * if all nodes come back with UNKNOWN then we know
	 * the lock needs remastering here.
	 * if any node comes back with a valid master, check
	 * to see if that master is the one that we are
	 * recovering.  if so, then the new_master died and
	 * we need to remaster this lock.  if not, then the
	 * new_master survived and that node will respond to
	 * other nodes about the owner.
	 * if there is an owner, this node needs to dump this
	 * lockres and alert the sender that this lockres
	 * was rejected. */
	spin_lock(&dlm->spinlock);
	dlm_node_iter_init(dlm->domain_map, &iter);
	spin_unlock(&dlm->spinlock);

	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
		/* do not send to self */
		if (nodenum == dlm->node_num)
			continue;
		ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
		if (ret < 0) {
			mlog_errno(ret);
K
Kurt Hackel 已提交
1655 1656 1657 1658
			if (!dlm_is_host_down(ret))
				BUG();
			/* host is down, so answer for that node would be
			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
		}
		if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
			mlog(0, "lock master is %u\n", *real_master);
			break;
		}
	}
	return ret;
}


K
Kurt Hackel 已提交
1669 1670
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
			  u8 nodenum, u8 *real_master)
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
{
	int ret = -EINVAL;
	struct dlm_master_requery req;
	int status = DLM_LOCK_RES_OWNER_UNKNOWN;

	memset(&req, 0, sizeof(req));
	req.node_idx = dlm->node_num;
	req.namelen = res->lockname.len;
	memcpy(req.name, res->lockname.name, res->lockname.len);

1681
resend:
1682 1683 1684
	ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
				 &req, sizeof(req), nodenum, &status);
	if (ret < 0)
1685 1686 1687
		mlog(ML_ERROR, "Error %d when sending message %u (key "
		     "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
		     dlm->key, nodenum);
1688 1689 1690 1691 1692
	else if (status == -ENOMEM) {
		mlog_errno(status);
		msleep(50);
		goto resend;
	} else {
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
		BUG_ON(status < 0);
		BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
		*real_master = (u8) (status & 0xff);
		mlog(0, "node %u responded to master requery with %u\n",
			  nodenum, *real_master);
		ret = 0;
	}
	return ret;
}


/* this function cannot error, so unless the sending
 * or receiving of the message failed, the owner can
 * be trusted */
1707 1708
int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
			       void **ret_data)
1709 1710 1711 1712
{
	struct dlm_ctxt *dlm = data;
	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
	struct dlm_lock_resource *res = NULL;
1713
	unsigned int hash;
1714 1715
	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
	u32 flags = DLM_ASSERT_MASTER_REQUERY;
1716
	int dispatched = 0;
1717 1718 1719 1720 1721 1722 1723

	if (!dlm_grab(dlm)) {
		/* since the domain has gone away on this
		 * node, the proper response is UNKNOWN */
		return master;
	}

1724 1725
	hash = dlm_lockid_hash(req->name, req->namelen);

1726
	spin_lock(&dlm->spinlock);
1727
	res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1728 1729 1730 1731 1732 1733 1734
	if (res) {
		spin_lock(&res->spinlock);
		master = res->owner;
		if (master == dlm->node_num) {
			int ret = dlm_dispatch_assert_master(dlm, res,
							     0, 0, flags);
			if (ret < 0) {
1735 1736 1737 1738 1739 1740 1741
				mlog_errno(ret);
				spin_unlock(&res->spinlock);
				dlm_lockres_put(res);
				spin_unlock(&dlm->spinlock);
				dlm_put(dlm);
				/* sender will take care of this and retry */
				return ret;
1742 1743
			} else {
				dispatched = 1;
1744
				__dlm_lockres_grab_inflight_worker(dlm, res);
1745
				spin_unlock(&res->spinlock);
1746
			}
1747 1748 1749
		} else {
			/* put.. incase we are not the master */
			spin_unlock(&res->spinlock);
1750
			dlm_lockres_put(res);
1751
		}
1752 1753 1754
	}
	spin_unlock(&dlm->spinlock);

1755 1756
	if (!dispatched)
		dlm_put(dlm);
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	return master;
}

static inline struct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
{
	struct list_head *ret;
	BUG_ON(list_num < 0);
	BUG_ON(list_num > 2);
	ret = &(res->granted);
	ret += list_num;
	return ret;
}
/* TODO: do ast flush business
 * TODO: do MIGRATING and RECOVERING spinning
 */

/*
* NOTE about in-flight requests during migration:
*
* Before attempting the migrate, the master has marked the lockres as
* MIGRATING and then flushed all of its pending ASTS.  So any in-flight
* requests either got queued before the MIGRATING flag got set, in which
* case the lock data will reflect the change and a return message is on
* the way, or the request failed to get in before MIGRATING got set.  In
* this case, the caller will be told to spin and wait for the MIGRATING
* flag to be dropped, then recheck the master.
* This holds true for the convert, cancel and unlock cases, and since lvb
* updates are tied to these same messages, it applies to lvb updates as
* well.  For the lock case, there is no way a lock can be on the master
* queue and not be on the secondary queue since the lock is always added
* locally first.  This means that the new target node will never be sent
* a lock that he doesn't already have on the list.
* In total, this means that the local lock is correct and should not be
* updated to match the one sent by the master.  Any messages sent back
* from the master before the MIGRATING flag will bring the lock properly
* up-to-date, and the change will be ordered properly for the waiter.
* We will *not* attempt to modify the lock underneath the waiter.
*/

static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
				     struct dlm_lock_resource *res,
				     struct dlm_migratable_lockres *mres)
{
	struct dlm_migratable_lock *ml;
1802
	struct list_head *queue, *iter;
1803
	struct list_head *tmpq = NULL;
1804 1805 1806
	struct dlm_lock *newlock = NULL;
	struct dlm_lockstatus *lksb = NULL;
	int ret = 0;
1807
	int i, j, bad;
1808
	struct dlm_lock *lock;
1809 1810
	u8 from = O2NM_MAX_NODES;
	unsigned int added = 0;
1811
	__be64 c;
1812 1813 1814 1815

	mlog(0, "running %d locks for this lockres\n", mres->num_locks);
	for (i=0; i<mres->num_locks; i++) {
		ml = &(mres->ml[i]);
1816 1817 1818 1819 1820 1821 1822 1823

		if (dlm_is_dummy_lock(dlm, ml, &from)) {
			/* placeholder, just need to set the refmap bit */
			BUG_ON(mres->num_locks != 1);
			mlog(0, "%s:%.*s: dummy lock for %u\n",
			     dlm->name, mres->lockname_len, mres->lockname,
			     from);
			spin_lock(&res->spinlock);
1824
			dlm_lockres_set_refmap_bit(dlm, res, from);
1825 1826 1827 1828
			spin_unlock(&res->spinlock);
			added++;
			break;
		}
1829 1830 1831 1832 1833
		BUG_ON(ml->highest_blocked != LKM_IVMODE);
		newlock = NULL;
		lksb = NULL;

		queue = dlm_list_num_to_pointer(res, ml->list);
1834
		tmpq = NULL;
1835 1836 1837 1838 1839 1840 1841 1842

		/* if the lock is for the local node it needs to
		 * be moved to the proper location within the queue.
		 * do not allocate a new lock structure. */
		if (ml->node == dlm->node_num) {
			/* MIGRATION ONLY! */
			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));

1843
			lock = NULL;
1844
			spin_lock(&res->spinlock);
1845 1846
			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
				tmpq = dlm_list_idx_to_ptr(res, j);
1847 1848 1849
				list_for_each(iter, tmpq) {
					lock = list_entry(iter,
						  struct dlm_lock, list);
1850
					if (lock->ml.cookie == ml->cookie)
1851
						break;
1852
					lock = NULL;
1853 1854
				}
				if (lock)
1855 1856 1857 1858 1859 1860
					break;
			}

			/* lock is always created locally first, and
			 * destroyed locally last.  it must be on the list */
			if (!lock) {
1861 1862 1863 1864 1865
				c = ml->cookie;
				mlog(ML_ERROR, "Could not find local lock "
					       "with cookie %u:%llu, node %u, "
					       "list %u, flags 0x%x, type %d, "
					       "conv %d, highest blocked %d\n",
1866
				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
				     ml->node, ml->list, ml->flags, ml->type,
				     ml->convert_type, ml->highest_blocked);
				__dlm_print_one_lock_resource(res);
				BUG();
			}

			if (lock->ml.node != ml->node) {
				c = lock->ml.cookie;
				mlog(ML_ERROR, "Mismatched node# in lock "
				     "cookie %u:%llu, name %.*s, node %u\n",
				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
				     res->lockname.len, res->lockname.name,
				     lock->ml.node);
				c = ml->cookie;
				mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
				     "node %u, list %u, flags 0x%x, type %d, "
				     "conv %d, highest blocked %d\n",
				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
				     ml->node, ml->list, ml->flags, ml->type,
				     ml->convert_type, ml->highest_blocked);
1890
				__dlm_print_one_lock_resource(res);
1891 1892 1893
				BUG();
			}

1894
			if (tmpq != queue) {
1895 1896 1897 1898 1899 1900 1901 1902
				c = ml->cookie;
				mlog(0, "Lock cookie %u:%llu was on list %u "
				     "instead of list %u for %.*s\n",
				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
				     j, ml->list, res->lockname.len,
				     res->lockname.name);
				__dlm_print_one_lock_resource(res);
1903 1904 1905 1906
				spin_unlock(&res->spinlock);
				continue;
			}

1907 1908 1909 1910 1911
			/* see NOTE above about why we do not update
			 * to match the master here */

			/* move the lock to its proper place */
			/* do not alter lock refcount.  switching lists. */
A
Akinobu Mita 已提交
1912
			list_move_tail(&lock->list, queue);
1913
			spin_unlock(&res->spinlock);
1914
			added++;
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935

			mlog(0, "just reordered a local lock!\n");
			continue;
		}

		/* lock is for another node. */
		newlock = dlm_new_lock(ml->type, ml->node,
				       be64_to_cpu(ml->cookie), NULL);
		if (!newlock) {
			ret = -ENOMEM;
			goto leave;
		}
		lksb = newlock->lksb;
		dlm_lock_attach_lockres(newlock, res);

		if (ml->convert_type != LKM_IVMODE) {
			BUG_ON(queue != &res->converting);
			newlock->ml.convert_type = ml->convert_type;
		}
		lksb->flags |= (ml->flags &
				(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1936 1937 1938 1939

		if (ml->type == LKM_NLMODE)
			goto skip_lvb;

X
Xue jiufei 已提交
1940 1941 1942 1943 1944 1945 1946
		/*
		 * If the lock is in the blocked list it can't have a valid lvb,
		 * so skip it
		 */
		if (ml->list == DLM_BLOCKED_LIST)
			goto skip_lvb;

K
Kurt Hackel 已提交
1947
		if (!dlm_lvb_is_empty(mres->lvb)) {
1948 1949 1950 1951 1952
			if (lksb->flags & DLM_LKSB_PUT_LVB) {
				/* other node was trying to update
				 * lvb when node died.  recreate the
				 * lksb with the updated lvb. */
				memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1953 1954 1955 1956 1957
				/* the lock resource lvb update must happen
				 * NOW, before the spinlock is dropped.
				 * we no longer wait for the AST to update
				 * the lvb. */
				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1958
			} else {
1959
				/* otherwise, the node is sending its
1960 1961 1962
				 * most recent valid lvb info */
				BUG_ON(ml->type != LKM_EXMODE &&
				       ml->type != LKM_PRMODE);
K
Kurt Hackel 已提交
1963
				if (!dlm_lvb_is_empty(res->lvb) &&
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
 				    (ml->type == LKM_EXMODE ||
 				     memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
 					int i;
 					mlog(ML_ERROR, "%s:%.*s: received bad "
 					     "lvb! type=%d\n", dlm->name,
 					     res->lockname.len,
 					     res->lockname.name, ml->type);
 					printk("lockres lvb=[");
 					for (i=0; i<DLM_LVB_LEN; i++)
 						printk("%02x", res->lvb[i]);
 					printk("]\nmigrated lvb=[");
 					for (i=0; i<DLM_LVB_LEN; i++)
 						printk("%02x", mres->lvb[i]);
 					printk("]\n");
 					dlm_print_one_lock_resource(res);
 					BUG();
1980 1981 1982 1983
				}
				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
			}
		}
1984
skip_lvb:
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001

		/* NOTE:
		 * wrt lock queue ordering and recovery:
		 *    1. order of locks on granted queue is
		 *       meaningless.
		 *    2. order of locks on converting queue is
		 *       LOST with the node death.  sorry charlie.
		 *    3. order of locks on the blocked queue is
		 *       also LOST.
		 * order of locks does not affect integrity, it
		 * just means that a lock request may get pushed
		 * back in line as a result of the node death.
		 * also note that for a given node the lock order
		 * for its secondary queue locks is preserved
		 * relative to each other, but clearly *not*
		 * preserved relative to locks from other nodes.
		 */
2002
		bad = 0;
2003
		spin_lock(&res->spinlock);
2004 2005
		list_for_each_entry(lock, queue, list) {
			if (lock->ml.cookie == ml->cookie) {
2006
				c = lock->ml.cookie;
2007 2008 2009
				mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
				     "exists on this lockres!\n", dlm->name,
				     res->lockname.len, res->lockname.name,
2010 2011
				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
				     dlm_get_lock_cookie_seq(be64_to_cpu(c)));
2012 2013 2014 2015

				mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
				     "node=%u, cookie=%u:%llu, queue=%d\n",
	      			     ml->type, ml->convert_type, ml->node,
2016 2017
				     dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
				     dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2018 2019 2020 2021 2022 2023 2024 2025 2026
				     ml->list);

				__dlm_print_one_lock_resource(res);
				bad = 1;
				break;
			}
		}
		if (!bad) {
			dlm_lock_get(newlock);
2027 2028 2029 2030 2031 2032 2033 2034 2035
			if (mres->flags & DLM_MRES_RECOVERY &&
					ml->list == DLM_CONVERTING_LIST &&
					newlock->ml.type >
					newlock->ml.convert_type) {
				/* newlock is doing downconvert, add it to the
				 * head of converting list */
				list_add(&newlock->list, queue);
			} else
				list_add_tail(&newlock->list, queue);
2036 2037 2038
			mlog(0, "%s:%.*s: added lock for node %u, "
			     "setting refmap bit\n", dlm->name,
			     res->lockname.len, res->lockname.name, ml->node);
2039
			dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2040
			added++;
2041
		}
2042 2043 2044 2045 2046
		spin_unlock(&res->spinlock);
	}
	mlog(0, "done running all the locks\n");

leave:
2047
	/* balance the ref taken when the work was queued */
2048 2049 2050
	spin_lock(&res->spinlock);
	dlm_lockres_drop_inflight_ref(dlm, res);
	spin_unlock(&res->spinlock);
2051

2052
	if (ret < 0)
2053 2054 2055 2056 2057 2058 2059 2060 2061
		mlog_errno(ret);

	return ret;
}

void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
				       struct dlm_lock_resource *res)
{
	int i;
2062 2063
	struct list_head *queue;
	struct dlm_lock *lock, *next;
2064

2065 2066
	assert_spin_locked(&dlm->spinlock);
	assert_spin_locked(&res->spinlock);
2067
	res->state |= DLM_LOCK_RES_RECOVERING;
2068 2069 2070 2071
	if (!list_empty(&res->recovering)) {
		mlog(0,
		     "Recovering res %s:%.*s, is already on recovery list!\n",
		     dlm->name, res->lockname.len, res->lockname.name);
2072
		list_del_init(&res->recovering);
2073
		dlm_lockres_put(res);
2074 2075 2076
	}
	/* We need to hold a reference while on the recovery list */
	dlm_lockres_get(res);
2077 2078 2079 2080 2081
	list_add_tail(&res->recovering, &dlm->reco.resources);

	/* find any pending locks and put them back on proper list */
	for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
		queue = dlm_list_idx_to_ptr(res, i);
2082
		list_for_each_entry_safe(lock, next, queue, list) {
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
			dlm_lock_get(lock);
			if (lock->convert_pending) {
				/* move converting lock back to granted */
				BUG_ON(i != DLM_CONVERTING_LIST);
				mlog(0, "node died with convert pending "
				     "on %.*s. move back to granted list.\n",
				     res->lockname.len, res->lockname.name);
				dlm_revert_pending_convert(res, lock);
				lock->convert_pending = 0;
			} else if (lock->lock_pending) {
				/* remove pending lock requests completely */
				BUG_ON(i != DLM_BLOCKED_LIST);
				mlog(0, "node died with lock pending "
				     "on %.*s. remove from blocked list and skip.\n",
				     res->lockname.len, res->lockname.name);
				/* lock will be floating until ref in
				 * dlmlock_remote is freed after the network
				 * call returns.  ok for it to not be on any
				 * list since no ast can be called
				 * (the master is dead). */
				dlm_revert_pending_lock(res, lock);
				lock->lock_pending = 0;
			} else if (lock->unlock_pending) {
				/* if an unlock was in progress, treat as
				 * if this had completed successfully
				 * before sending this lock state to the
				 * new master.  note that the dlm_unlock
				 * call is still responsible for calling
				 * the unlockast.  that will happen after
				 * the network call times out.  for now,
				 * just move lists to prepare the new
				 * recovery master.  */
				BUG_ON(i != DLM_GRANTED_LIST);
				mlog(0, "node died with unlock pending "
				     "on %.*s. remove from blocked list and skip.\n",
				     res->lockname.len, res->lockname.name);
				dlm_commit_pending_unlock(res, lock);
				lock->unlock_pending = 0;
			} else if (lock->cancel_pending) {
				/* if a cancel was in progress, treat as
				 * if this had completed successfully
				 * before sending this lock state to the
				 * new master */
				BUG_ON(i != DLM_CONVERTING_LIST);
				mlog(0, "node died with cancel pending "
				     "on %.*s. move back to granted list.\n",
				     res->lockname.len, res->lockname.name);
				dlm_commit_pending_cancel(res, lock);
				lock->cancel_pending = 0;
			}
			dlm_lock_put(lock);
		}
	}
}



/* removes all recovered locks from the recovery list.
 * sets the res->owner to the new master.
 * unsets the RECOVERY flag and wakes waiters. */
static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
					      u8 dead_node, u8 new_master)
{
	int i;
2147
	struct hlist_head *bucket;
2148
	struct dlm_lock_resource *res, *next;
2149 2150 2151

	assert_spin_locked(&dlm->spinlock);

2152
	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2153
		if (res->owner == dead_node) {
2154 2155 2156
			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
			     dlm->name, res->lockname.len, res->lockname.name,
			     res->owner, new_master);
2157 2158
			list_del_init(&res->recovering);
			spin_lock(&res->spinlock);
2159 2160
			/* new_master has our reference from
			 * the lock state sent during recovery */
2161 2162
			dlm_change_lockres_owner(dlm, res, new_master);
			res->state &= ~DLM_LOCK_RES_RECOVERING;
2163
			if (__dlm_lockres_has_locks(res))
2164
				__dlm_dirty_lockres(dlm, res);
2165 2166
			spin_unlock(&res->spinlock);
			wake_up(&res->wq);
2167
			dlm_lockres_put(res);
2168 2169 2170 2171 2172 2173 2174
		}
	}

	/* this will become unnecessary eventually, but
	 * for now we need to run the whole hash, clear
	 * the RECOVERING state and set the owner
	 * if necessary */
2175
	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2176
		bucket = dlm_lockres_hash(dlm, i);
2177
		hlist_for_each_entry(res, bucket, hash_node) {
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
			if (!(res->state & DLM_LOCK_RES_RECOVERING))
				continue;

			if (res->owner != dead_node &&
			    res->owner != dlm->node_num)
				continue;

			if (!list_empty(&res->recovering)) {
				list_del_init(&res->recovering);
				dlm_lockres_put(res);
2188
			}
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201

			/* new_master has our reference from
			 * the lock state sent during recovery */
			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
			     dlm->name, res->lockname.len, res->lockname.name,
			     res->owner, new_master);
			spin_lock(&res->spinlock);
			dlm_change_lockres_owner(dlm, res, new_master);
			res->state &= ~DLM_LOCK_RES_RECOVERING;
			if (__dlm_lockres_has_locks(res))
				__dlm_dirty_lockres(dlm, res);
			spin_unlock(&res->spinlock);
			wake_up(&res->wq);
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
		}
	}
}

static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
{
	if (local) {
		if (lock->ml.type != LKM_EXMODE &&
		    lock->ml.type != LKM_PRMODE)
			return 1;
	} else if (lock->ml.type == LKM_EXMODE)
		return 1;
	return 0;
}

static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
			       struct dlm_lock_resource *res, u8 dead_node)
{
2220
	struct list_head *queue;
2221 2222 2223 2224 2225 2226 2227 2228 2229
	struct dlm_lock *lock;
	int blank_lvb = 0, local = 0;
	int i;
	u8 search_node;

	assert_spin_locked(&dlm->spinlock);
	assert_spin_locked(&res->spinlock);

	if (res->owner == dlm->node_num)
2230
		/* if this node owned the lockres, and if the dead node
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
		 * had an EX when he died, blank out the lvb */
		search_node = dead_node;
	else {
		/* if this is a secondary lockres, and we had no EX or PR
		 * locks granted, we can no longer trust the lvb */
		search_node = dlm->node_num;
		local = 1;  /* check local state for valid lvb */
	}

	for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
		queue = dlm_list_idx_to_ptr(res, i);
2242
		list_for_each_entry(lock, queue, list) {
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
			if (lock->ml.node == search_node) {
				if (dlm_lvb_needs_invalidation(lock, local)) {
					/* zero the lksb lvb and lockres lvb */
					blank_lvb = 1;
					memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
				}
			}
		}
	}

	if (blank_lvb) {
		mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
		     res->lockname.len, res->lockname.name, dead_node);
		memset(res->lvb, 0, DLM_LVB_LEN);
	}
}

static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
				struct dlm_lock_resource *res, u8 dead_node)
{
2263
	struct dlm_lock *lock, *next;
2264
	unsigned int freed = 0;
2265 2266 2267

	/* this node is the lockres master:
	 * 1) remove any stale locks for the dead node
2268
	 * 2) if the dead node had an EX when he died, blank out the lvb
2269 2270 2271 2272
	 */
	assert_spin_locked(&dlm->spinlock);
	assert_spin_locked(&res->spinlock);

2273 2274 2275
	/* We do two dlm_lock_put(). One for removing from list and the other is
	 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */

2276
	/* TODO: check pending_asts, pending_basts here */
2277
	list_for_each_entry_safe(lock, next, &res->granted, list) {
2278 2279 2280
		if (lock->ml.node == dead_node) {
			list_del_init(&lock->list);
			dlm_lock_put(lock);
2281 2282
			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
			dlm_lock_put(lock);
2283
			freed++;
2284 2285
		}
	}
2286
	list_for_each_entry_safe(lock, next, &res->converting, list) {
2287 2288 2289
		if (lock->ml.node == dead_node) {
			list_del_init(&lock->list);
			dlm_lock_put(lock);
2290 2291
			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
			dlm_lock_put(lock);
2292
			freed++;
2293 2294
		}
	}
2295
	list_for_each_entry_safe(lock, next, &res->blocked, list) {
2296 2297 2298
		if (lock->ml.node == dead_node) {
			list_del_init(&lock->list);
			dlm_lock_put(lock);
2299 2300
			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
			dlm_lock_put(lock);
2301
			freed++;
2302 2303 2304
		}
	}

2305 2306 2307 2308
	if (freed) {
		mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
		     "dropping ref from lockres\n", dlm->name,
		     res->lockname.len, res->lockname.name, freed, dead_node);
2309 2310 2311 2312 2313 2314
		if(!test_bit(dead_node, res->refmap)) {
			mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
			     "but ref was not set\n", dlm->name,
			     res->lockname.len, res->lockname.name, freed, dead_node);
			__dlm_print_one_lock_resource(res);
		}
2315
		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2316 2317 2318 2319
	} else if (test_bit(dead_node, res->refmap)) {
		mlog(0, "%s:%.*s: dead node %u had a ref, but had "
		     "no locks and had not purged before dying\n", dlm->name,
		     res->lockname.len, res->lockname.name, dead_node);
2320
		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2321 2322
	}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	/* do not kick thread yet */
	__dlm_dirty_lockres(dlm, res);
}

/* if this node is the recovery master, and there are no
 * locks for a given lockres owned by this node that are in
 * either PR or EX mode, zero out the lvb before requesting.
 *
 */


static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{
	struct dlm_lock_resource *res;
	int i;
2338
	struct hlist_head *bucket;
K
Kurt Hackel 已提交
2339
	struct dlm_lock *lock;
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358


	/* purge any stale mles */
	dlm_clean_master_list(dlm, dead_node);

	/*
	 * now clean up all lock resources.  there are two rules:
	 *
	 * 1) if the dead node was the master, move the lockres
	 *    to the recovering list.  set the RECOVERING flag.
	 *    this lockres needs to be cleaned up before it can
	 *    be used further.
	 *
	 * 2) if this node was the master, remove all locks from
	 *    each of the lockres queues that were owned by the
	 *    dead node.  once recovery finishes, the dlm thread
	 *    can be kicked again to see if any ASTs or BASTs
	 *    need to be fired as a result.
	 */
2359
	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2360
		bucket = dlm_lockres_hash(dlm, i);
2361
		hlist_for_each_entry(res, bucket, hash_node) {
K
Kurt Hackel 已提交
2362 2363
 			/* always prune any $RECOVERY entries for dead nodes,
 			 * otherwise hangs can occur during later recovery */
2364
			if (dlm_is_recovery_lock(res->lockname.name,
K
Kurt Hackel 已提交
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
						 res->lockname.len)) {
				spin_lock(&res->spinlock);
				list_for_each_entry(lock, &res->granted, list) {
					if (lock->ml.node == dead_node) {
						mlog(0, "AHA! there was "
						     "a $RECOVERY lock for dead "
						     "node %u (%s)!\n",
						     dead_node, dlm->name);
						list_del_init(&lock->list);
						dlm_lock_put(lock);
2375 2376 2377 2378
						/* Can't schedule
						 * DLM_UNLOCK_FREE_LOCK
						 * - do manually */
						dlm_lock_put(lock);
K
Kurt Hackel 已提交
2379 2380 2381
						break;
					}
				}
2382 2383
				dlm_lockres_clear_refmap_bit(dlm, res,
						dead_node);
K
Kurt Hackel 已提交
2384
				spin_unlock(&res->spinlock);
2385
				continue;
2386
			}
2387 2388 2389
			spin_lock(&res->spinlock);
			/* zero the lvb if necessary */
			dlm_revalidate_lvb(dlm, res, dead_node);
2390
			if (res->owner == dead_node) {
2391
				if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
					mlog(0, "%s:%.*s: owned by "
						"dead node %u, this node was "
						"dropping its ref when it died. "
						"continue, dropping the flag.\n",
						dlm->name, res->lockname.len,
						res->lockname.name, dead_node);
				}
				res->state &= ~DLM_LOCK_RES_DROPPING_REF;
				dlm_move_lockres_to_recovery_list(dlm,
						res);
2402
			} else if (res->owner == dlm->node_num) {
2403 2404
				dlm_free_dead_locks(dlm, res, dead_node);
				__dlm_lockres_calc_usage(dlm, res);
2405 2406 2407 2408 2409 2410 2411 2412
			} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
				if (test_bit(dead_node, res->refmap)) {
					mlog(0, "%s:%.*s: dead node %u had a ref, but had "
						"no locks and had not purged before dying\n",
						dlm->name, res->lockname.len,
						res->lockname.name, dead_node);
					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
				}
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
			}
			spin_unlock(&res->spinlock);
		}
	}

}

static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
{
	assert_spin_locked(&dlm->spinlock);

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
	if (dlm->reco.new_master == idx) {
		mlog(0, "%s: recovery master %d just died\n",
		     dlm->name, idx);
		if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
			/* finalize1 was reached, so it is safe to clear
			 * the new_master and dead_node.  that recovery
			 * is complete. */
			mlog(0, "%s: dead master %d had reached "
			     "finalize1 state, clearing\n", dlm->name, idx);
			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
			__dlm_reset_recovery(dlm);
		}
	}

2438 2439 2440 2441 2442 2443
	/* Clean up join state on node death. */
	if (dlm->joining_node == idx) {
		mlog(0, "Clearing join state for node %u\n", idx);
		__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
	}

2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
	/* check to see if the node is already considered dead */
	if (!test_bit(idx, dlm->live_nodes_map)) {
		mlog(0, "for domain %s, node %d is already dead. "
		     "another node likely did recovery already.\n",
		     dlm->name, idx);
		return;
	}

	/* check to see if we do not care about this node */
	if (!test_bit(idx, dlm->domain_map)) {
		/* This also catches the case that we get a node down
		 * but haven't joined the domain yet. */
		mlog(0, "node %u already removed from domain!\n", idx);
		return;
	}

	clear_bit(idx, dlm->live_nodes_map);

	/* make sure local cleanup occurs before the heartbeat events */
	if (!test_bit(idx, dlm->recovery_map))
		dlm_do_local_recovery_cleanup(dlm, idx);

	/* notify anything attached to the heartbeat events */
	dlm_hb_event_notify_attached(dlm, idx, 0);

	mlog(0, "node %u being removed from domain map!\n", idx);
	clear_bit(idx, dlm->domain_map);
2471
	clear_bit(idx, dlm->exit_domain_map);
2472 2473 2474 2475
	/* wake up migration waiters if a node goes down.
	 * perhaps later we can genericize this for other waiters. */
	wake_up(&dlm->migration_wq);

J
Junxiao Bi 已提交
2476
	set_bit(idx, dlm->recovery_map);
2477 2478 2479 2480 2481 2482 2483 2484 2485
}

void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
{
	struct dlm_ctxt *dlm = data;

	if (!dlm_grab(dlm))
		return;

2486 2487 2488 2489 2490 2491 2492
	/*
	 * This will notify any dlm users that a node in our domain
	 * went away without notifying us first.
	 */
	if (test_bit(idx, dlm->domain_map))
		dlm_fire_domain_eviction_callbacks(dlm, idx);

2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
	spin_lock(&dlm->spinlock);
	__dlm_hb_node_down(dlm, idx);
	spin_unlock(&dlm->spinlock);

	dlm_put(dlm);
}

void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
{
	struct dlm_ctxt *dlm = data;

	if (!dlm_grab(dlm))
		return;

	spin_lock(&dlm->spinlock);
	set_bit(idx, dlm->live_nodes_map);
K
Kurt Hackel 已提交
2509 2510
	/* do NOT notify mle attached to the heartbeat events.
	 * new nodes are not interesting in mastery until joined. */
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	spin_unlock(&dlm->spinlock);

	dlm_put(dlm);
}

static void dlm_reco_ast(void *astdata)
{
	struct dlm_ctxt *dlm = astdata;
	mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
	     dlm->node_num, dlm->name);
}
static void dlm_reco_bast(void *astdata, int blocked_type)
{
	struct dlm_ctxt *dlm = astdata;
	mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
	     dlm->node_num, dlm->name);
}
static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
{
	mlog(0, "unlockast for recovery lock fired!\n");
}

K
Kurt Hackel 已提交
2533 2534 2535 2536 2537 2538 2539
/*
 * dlm_pick_recovery_master will continually attempt to use
 * dlmlock() on the special "$RECOVERY" lockres with the
 * LKM_NOQUEUE flag to get an EX.  every thread that enters
 * this function on each node racing to become the recovery
 * master will not stop attempting this until either:
 * a) this node gets the EX (and becomes the recovery master),
2540
 * or b) dlm->reco.new_master gets set to some nodenum
K
Kurt Hackel 已提交
2541 2542 2543 2544
 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
 * so each time a recovery master is needed, the entire cluster
 * will sync at this point.  if the new master dies, that will
 * be detected in dlm_do_recovery */
2545 2546 2547 2548 2549 2550 2551 2552
static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
{
	enum dlm_status ret;
	struct dlm_lockstatus lksb;
	int status = -EINVAL;

	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
	     dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2553
again:
2554 2555 2556
	memset(&lksb, 0, sizeof(lksb));

	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2557 2558
		      DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
		      dlm_reco_ast, dlm, dlm_reco_bast);
2559

K
Kurt Hackel 已提交
2560 2561 2562
	mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
	     dlm->name, ret, lksb.status);

2563 2564 2565
	if (ret == DLM_NORMAL) {
		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
		     dlm->name, dlm->node_num);
2566 2567

		/* got the EX lock.  check to see if another node
K
Kurt Hackel 已提交
2568 2569 2570 2571 2572 2573 2574
		 * just became the reco master */
		if (dlm_reco_master_ready(dlm)) {
			mlog(0, "%s: got reco EX lock, but %u will "
			     "do the recovery\n", dlm->name,
			     dlm->reco.new_master);
			status = -EEXIST;
		} else {
2575 2576 2577 2578 2579
			status = 0;

			/* see if recovery was already finished elsewhere */
			spin_lock(&dlm->spinlock);
			if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2580
				status = -EINVAL;
2581 2582 2583 2584
				mlog(0, "%s: got reco EX lock, but "
				     "node got recovered already\n", dlm->name);
				if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
					mlog(ML_ERROR, "%s: new master is %u "
2585
					     "but no dead node!\n",
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
					     dlm->name, dlm->reco.new_master);
					BUG();
				}
			}
			spin_unlock(&dlm->spinlock);
		}

		/* if this node has actually become the recovery master,
		 * set the master and send the messages to begin recovery */
		if (!status) {
			mlog(0, "%s: dead=%u, this=%u, sending "
2597
			     "begin_reco now\n", dlm->name,
2598
			     dlm->reco.dead_node, dlm->node_num);
K
Kurt Hackel 已提交
2599 2600 2601 2602 2603 2604 2605
			status = dlm_send_begin_reco_message(dlm,
				      dlm->reco.dead_node);
			/* this always succeeds */
			BUG_ON(status);

			/* set the new_master to this node */
			spin_lock(&dlm->spinlock);
2606
			dlm_set_reco_master(dlm, dlm->node_num);
K
Kurt Hackel 已提交
2607 2608
			spin_unlock(&dlm->spinlock);
		}
2609 2610 2611 2612

		/* recovery lock is a special case.  ast will not get fired,
		 * so just go ahead and unlock it. */
		ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
K
Kurt Hackel 已提交
2613 2614 2615 2616
		if (ret == DLM_DENIED) {
			mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
			ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
		}
2617 2618 2619 2620 2621 2622 2623
		if (ret != DLM_NORMAL) {
			/* this would really suck. this could only happen
			 * if there was a network error during the unlock
			 * because of node death.  this means the unlock
			 * is actually "done" and the lock structure is
			 * even freed.  we can continue, but only
			 * because this specific lock name is special. */
K
Kurt Hackel 已提交
2624
			mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2625 2626 2627 2628 2629
		}
	} else if (ret == DLM_NOTQUEUED) {
		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
		     dlm->name, dlm->node_num);
		/* another node is master. wait on
2630
		 * reco.new_master != O2NM_INVALID_NODE_NUM
K
Kurt Hackel 已提交
2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642
		 * for at most one second */
		wait_event_timeout(dlm->dlm_reco_thread_wq,
					 dlm_reco_master_ready(dlm),
					 msecs_to_jiffies(1000));
		if (!dlm_reco_master_ready(dlm)) {
			mlog(0, "%s: reco master taking awhile\n",
			     dlm->name);
			goto again;
		}
		/* another node has informed this one that it is reco master */
		mlog(0, "%s: reco master %u is ready to recover %u\n",
		     dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2643
		status = -EEXIST;
2644 2645 2646 2647
	} else if (ret == DLM_RECOVERING) {
		mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
		     dlm->name, dlm->node_num);
		goto again;
K
Kurt Hackel 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
	} else {
		struct dlm_lock_resource *res;

		/* dlmlock returned something other than NOTQUEUED or NORMAL */
		mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
		     "lksb.status=%s\n", dlm->name, dlm_errname(ret),
		     dlm_errname(lksb.status));
		res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
					 DLM_RECOVERY_LOCK_NAME_LEN);
		if (res) {
			dlm_print_one_lock_resource(res);
			dlm_lockres_put(res);
		} else {
			mlog(ML_ERROR, "recovery lock not found\n");
		}
		BUG();
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
	}

	return status;
}

static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
{
	struct dlm_begin_reco br;
	int ret = 0;
	struct dlm_node_iter iter;
	int nodenum;
	int status;

2677
	mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699

	spin_lock(&dlm->spinlock);
	dlm_node_iter_init(dlm->domain_map, &iter);
	spin_unlock(&dlm->spinlock);

	clear_bit(dead_node, iter.node_map);

	memset(&br, 0, sizeof(br));
	br.node_idx = dlm->node_num;
	br.dead_node = dead_node;

	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
		ret = 0;
		if (nodenum == dead_node) {
			mlog(0, "not sending begin reco to dead node "
				  "%u\n", dead_node);
			continue;
		}
		if (nodenum == dlm->node_num) {
			mlog(0, "not sending begin reco to self\n");
			continue;
		}
K
Kurt Hackel 已提交
2700
retry:
2701 2702 2703 2704 2705 2706 2707 2708
		ret = -EINVAL;
		mlog(0, "attempting to send begin reco msg to %d\n",
			  nodenum);
		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
					 &br, sizeof(br), nodenum, &status);
		/* negative status is handled ok by caller here */
		if (ret >= 0)
			ret = status;
K
Kurt Hackel 已提交
2709 2710 2711
		if (dlm_is_host_down(ret)) {
			/* node is down.  not involved in recovery
			 * so just keep going */
2712
			mlog(ML_NOTICE, "%s: node %u was down when sending "
K
Kurt Hackel 已提交
2713 2714 2715
			     "begin reco msg (%d)\n", dlm->name, nodenum, ret);
			ret = 0;
		}
2716 2717 2718 2719 2720 2721 2722

		/*
		 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
		 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
		 * We are handling both for compatibility reasons.
		 */
		if (ret == -EAGAIN || ret == EAGAIN) {
2723 2724 2725 2726 2727 2728 2729
			mlog(0, "%s: trying to start recovery of node "
			     "%u, but node %u is waiting for last recovery "
			     "to complete, backoff for a bit\n", dlm->name,
			     dead_node, nodenum);
			msleep(100);
			goto retry;
		}
2730 2731
		if (ret < 0) {
			struct dlm_lock_resource *res;
2732

2733
			/* this is now a serious problem, possibly ENOMEM
K
Kurt Hackel 已提交
2734
			 * in the network stack.  must retry */
2735 2736
			mlog_errno(ret);
			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2737
			     "returned %d\n", dlm->name, nodenum, ret);
2738 2739 2740 2741 2742 2743 2744 2745
			res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
						 DLM_RECOVERY_LOCK_NAME_LEN);
			if (res) {
				dlm_print_one_lock_resource(res);
				dlm_lockres_put(res);
			} else {
				mlog(ML_ERROR, "recovery lock not found\n");
			}
2746
			/* sleep for a bit in hopes that we can avoid
K
Kurt Hackel 已提交
2747 2748 2749
			 * another ENOMEM */
			msleep(100);
			goto retry;
2750 2751 2752 2753 2754 2755
		}
	}

	return ret;
}

2756 2757
int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
			   void **ret_data)
2758 2759 2760 2761 2762 2763 2764 2765
{
	struct dlm_ctxt *dlm = data;
	struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;

	/* ok to return 0, domain has gone away */
	if (!dlm_grab(dlm))
		return 0;

2766 2767 2768 2769 2770 2771 2772
	spin_lock(&dlm->spinlock);
	if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
		mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
		     "but this node is in finalize state, waiting on finalize2\n",
		     dlm->name, br->node_idx, br->dead_node,
		     dlm->reco.dead_node, dlm->reco.new_master);
		spin_unlock(&dlm->spinlock);
2773
		dlm_put(dlm);
2774
		return -EAGAIN;
2775 2776 2777
	}
	spin_unlock(&dlm->spinlock);

2778 2779 2780
	mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
	     dlm->name, br->node_idx, br->dead_node,
	     dlm->reco.dead_node, dlm->reco.new_master);
2781 2782 2783 2784 2785

	dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);

	spin_lock(&dlm->spinlock);
	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
K
Kurt Hackel 已提交
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
		if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
			mlog(0, "%s: new_master %u died, changing "
			     "to %u\n", dlm->name, dlm->reco.new_master,
			     br->node_idx);
		} else {
			mlog(0, "%s: new_master %u NOT DEAD, changing "
			     "to %u\n", dlm->name, dlm->reco.new_master,
			     br->node_idx);
			/* may not have seen the new master as dead yet */
		}
2796 2797
	}
	if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
K
Kurt Hackel 已提交
2798
		mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2799
		     "node %u changing it to %u\n", dlm->name,
K
Kurt Hackel 已提交
2800
		     dlm->reco.dead_node, br->node_idx, br->dead_node);
2801
	}
2802 2803
	dlm_set_reco_master(dlm, br->node_idx);
	dlm_set_reco_dead_node(dlm, br->dead_node);
2804
	if (!test_bit(br->dead_node, dlm->recovery_map)) {
K
Kurt Hackel 已提交
2805
		mlog(0, "recovery master %u sees %u as dead, but this "
2806 2807
		     "node has not yet.  marking %u as dead\n",
		     br->node_idx, br->dead_node, br->dead_node);
K
Kurt Hackel 已提交
2808 2809 2810 2811 2812
		if (!test_bit(br->dead_node, dlm->domain_map) ||
		    !test_bit(br->dead_node, dlm->live_nodes_map))
			mlog(0, "%u not in domain/live_nodes map "
			     "so setting it in reco map manually\n",
			     br->dead_node);
K
Kurt Hackel 已提交
2813 2814 2815 2816
		/* force the recovery cleanup in __dlm_hb_node_down
		 * both of these will be cleared in a moment */
		set_bit(br->dead_node, dlm->domain_map);
		set_bit(br->dead_node, dlm->live_nodes_map);
2817 2818 2819 2820 2821
		__dlm_hb_node_down(dlm, br->dead_node);
	}
	spin_unlock(&dlm->spinlock);

	dlm_kick_recovery_thread(dlm);
2822 2823 2824 2825 2826

	mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
	     dlm->name, br->node_idx, br->dead_node,
	     dlm->reco.dead_node, dlm->reco.new_master);

2827 2828 2829 2830
	dlm_put(dlm);
	return 0;
}

2831
#define DLM_FINALIZE_STAGE2  0x01
2832 2833 2834 2835 2836 2837 2838
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
{
	int ret = 0;
	struct dlm_finalize_reco fr;
	struct dlm_node_iter iter;
	int nodenum;
	int status;
2839
	int stage = 1;
2840

2841 2842
	mlog(0, "finishing recovery for node %s:%u, "
	     "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2843 2844 2845 2846 2847

	spin_lock(&dlm->spinlock);
	dlm_node_iter_init(dlm->domain_map, &iter);
	spin_unlock(&dlm->spinlock);

2848
stage2:
2849 2850 2851
	memset(&fr, 0, sizeof(fr));
	fr.node_idx = dlm->node_num;
	fr.dead_node = dlm->reco.dead_node;
2852 2853
	if (stage == 2)
		fr.flags |= DLM_FINALIZE_STAGE2;
2854 2855 2856 2857 2858 2859

	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
		if (nodenum == dlm->node_num)
			continue;
		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
					 &fr, sizeof(fr), nodenum, &status);
2860
		if (ret >= 0)
2861
			ret = status;
2862
		if (ret < 0) {
2863 2864 2865
			mlog(ML_ERROR, "Error %d when sending message %u (key "
			     "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
			     dlm->key, nodenum);
2866
			if (dlm_is_host_down(ret)) {
2867 2868
				/* this has no effect on this recovery
				 * session, so set the status to zero to
2869 2870 2871 2872
				 * finish out the last recovery */
				mlog(ML_ERROR, "node %u went down after this "
				     "node finished recovery.\n", nodenum);
				ret = 0;
2873
				continue;
2874 2875 2876 2877
			}
			break;
		}
	}
2878 2879 2880 2881 2882 2883
	if (stage == 1) {
		/* reset the node_iter back to the top and send finalize2 */
		iter.curnode = -1;
		stage = 2;
		goto stage2;
	}
2884 2885 2886 2887

	return ret;
}

2888 2889
int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
			      void **ret_data)
2890 2891 2892
{
	struct dlm_ctxt *dlm = data;
	struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2893
	int stage = 1;
2894 2895 2896 2897 2898

	/* ok to return 0, domain has gone away */
	if (!dlm_grab(dlm))
		return 0;

2899 2900
	if (fr->flags & DLM_FINALIZE_STAGE2)
		stage = 2;
2901

2902 2903 2904
	mlog(0, "%s: node %u finalizing recovery stage%d of "
	     "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
	     fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2905

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
	spin_lock(&dlm->spinlock);

	if (dlm->reco.new_master != fr->node_idx) {
		mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
		     "%u is supposed to be the new master, dead=%u\n",
		     fr->node_idx, dlm->reco.new_master, fr->dead_node);
		BUG();
	}
	if (dlm->reco.dead_node != fr->dead_node) {
		mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
		     "node %u, but node %u is supposed to be dead\n",
		     fr->node_idx, fr->dead_node, dlm->reco.dead_node);
		BUG();
	}

2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
	switch (stage) {
		case 1:
			dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
			if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
				mlog(ML_ERROR, "%s: received finalize1 from "
				     "new master %u for dead node %u, but "
				     "this node has already received it!\n",
				     dlm->name, fr->node_idx, fr->dead_node);
				dlm_print_reco_node_status(dlm);
				BUG();
			}
			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
			spin_unlock(&dlm->spinlock);
			break;
		case 2:
			if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
				mlog(ML_ERROR, "%s: received finalize2 from "
				     "new master %u for dead node %u, but "
				     "this node did not have finalize1!\n",
				     dlm->name, fr->node_idx, fr->dead_node);
				dlm_print_reco_node_status(dlm);
				BUG();
			}
			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
J
Junxiao Bi 已提交
2945
			__dlm_reset_recovery(dlm);
2946 2947 2948 2949 2950 2951
			spin_unlock(&dlm->spinlock);
			dlm_kick_recovery_thread(dlm);
			break;
		default:
			BUG();
	}
2952

2953 2954 2955
	mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
	     dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);

2956 2957 2958
	dlm_put(dlm);
	return 0;
}