proc.c 24.7 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5 6 7 8 9
 *
 * Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
10
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.50 1999/02/19 07:10:48 tgl Exp $
11 12 13 14
 *
 *-------------------------------------------------------------------------
 */
/*
15 16
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
17 18 19
 *
 *
 * Interface (a):
20 21 22
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
23 24 25 26 27 28 29 30 31 32
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
33
 *		associated with the process.
34 35
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
36 37 38 39 40 41
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
42 43
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
44 45 46 47
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
48
 *
49
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.50 1999/02/19 07:10:48 tgl Exp $
50 51 52 53
 */
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
54
#include <signal.h>
55
#include <sys/types.h>
M
Marc G. Fournier 已提交
56

B
Bruce Momjian 已提交
57
#if defined(solaris_sparc)
58 59 60 61
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

M
Marc G. Fournier 已提交
62
#include "postgres.h"
63
#include "miscadmin.h"
64
#include "libpq/pqsignal.h"
65 66 67 68

#include "access/xact.h"
#include "utils/hsearch.h"

69 70 71
#include "storage/ipc.h"
/* In Ultrix, sem.h must be included after ipc.h */
#include <sys/sem.h>
72
#include "storage/buf.h"
73
#include "storage/lock.h"
B
Bruce Momjian 已提交
74
#include "storage/lmgr.h"
75 76 77
#include "storage/shmem.h"
#include "storage/spin.h"
#include "storage/proc.h"
M
 
Marc G. Fournier 已提交
78
#include "utils/trace.h"
79

B
Bruce Momjian 已提交
80
static void HandleDeadLock(int sig);
81
static PROC *ProcWakeup(PROC *proc, int errType);
82
static void ProcFreeAllSemaphores(void);
83

M
 
Marc G. Fournier 已提交
84 85
#define DeadlockCheckTimer pg_options[OPT_DEADLOCKTIMEOUT]

86 87 88 89 90 91 92
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
93
SPINLOCK	ProcStructLock;
94 95 96 97 98

/*
 * For cleanup routines.  Don't cleanup if the initialization
 * has not happened.
 */
99
static bool ProcInitialized = FALSE;
100 101 102

static PROC_HDR *ProcGlobal = NULL;

103
PROC	   *MyProc = NULL;
104

105
static void ProcKill(int exitStatus, int pid);
106
static void ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum);
107
static void ProcFreeSem(IpcSemaphoreKey semKey, int semNum);
108 109 110

/*
 * InitProcGlobal -
111
 *	  initializes the global process table. We put it here so that
112
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
113 114 115
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
116 117 118 119 120 121 122 123 124 125 126
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
127 128
 */
void
129
InitProcGlobal(IPCKey key, int maxBackends)
130
{
131
	bool		found = false;
132

133 134 135
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
136

137 138
	/* --------------------
	 * We're the first - initialize.
139 140
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
141 142 143
	 * --------------------
	 */
	if (!found)
144
	{
145
		int			i;
146

147 148 149 150
		ProcGlobal->freeProcs = INVALID_OFFSET;
		ProcGlobal->currKey = IPCGetProcessSemaphoreInitKey(key);
		for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
			ProcGlobal->freeSemMap[i] = 0;
151 152 153 154 155 156

		/* Arrange to delete semas on exit --- set this up now so that
		 * we will clean up if pre-allocation fails...
		 */
		on_shmem_exit(ProcFreeAllSemaphores, NULL);

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
		/* Pre-create the semaphores for the first maxBackends processes */
		for (i = 0;
			 i < (maxBackends+PROC_NSEMS_PER_SET-1) / PROC_NSEMS_PER_SET;
			 i++)
		{
			IPCKey		semKey = ProcGlobal->currKey + i;
			int			semId;
			int			semstat;

			semId = IpcSemaphoreCreate(semKey,
									   PROC_NSEMS_PER_SET,
									   IPCProtection,
									   IpcSemaphoreDefaultStartValue,
									   0,
									   &semstat);
			/* mark this sema set allocated */
			ProcGlobal->freeSemMap[i] = (1 << PROC_NSEMS_PER_SET);
		}
175 176 177 178 179 180 181 182 183 184 185
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
InitProcess(IPCKey key)
{
186 187 188 189
	bool		found = false;
	int			semstat;
	unsigned long location,
				myOffset;
190 191 192 193 194

	/* ------------------
	 * Routine called if deadlock timer goes off. See ProcSleep()
	 * ------------------
	 */
B
Bruce Momjian 已提交
195 196
	pqsignal(SIGALRM, HandleDeadLock);

197 198 199 200 201 202
	SpinAcquire(ProcStructLock);

	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
	if (!found)
203
	{
204
		/* this should not happen. InitProcGlobal() is called before this. */
205
		elog(ERROR, "InitProcess: Proc Header uninitialized");
206
	}
207 208

	if (MyProc != NULL)
209
	{
210
		SpinRelease(ProcStructLock);
211
		elog(ERROR, "ProcInit: you already exist");
212
		return;
213
	}
214 215 216 217 218 219

	/* try to get a proc from the free list first */

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
220
	{
221 222 223 224 225 226 227
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
228 229 230 231
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
232 233 234 235
		 */

		MyProc = (PROC *) ShmemAlloc((unsigned) sizeof(PROC));
		if (!MyProc)
236
		{
237 238
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
239
		}
240 241 242

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
243
	}
244

245
	/*
246 247 248
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
249
	 */
B
Bruce Momjian 已提交
250
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
251 252 253 254 255
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
256 257 258 259
		IPCKey		semKey;
		int			semNum;
		int			semId;
		union semun semun;
260 261 262

		ProcGetNewSemKeyAndNum(&semKey, &semNum);

263 264 265 266 267
		/* Note: because of the pre-allocation done in InitProcGlobal,
		 * this call should always attach to an existing semaphore.
		 * It will (try to) create a new group of semaphores only if
		 * the postmaster tries to start more backends than it said it would.
		 */
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
		semId = IpcSemaphoreCreate(semKey,
								   PROC_NSEMS_PER_SET,
								   IPCProtection,
								   IpcSemaphoreDefaultStartValue,
								   0,
								   &semstat);

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
		semun.val = IpcSemaphoreDefaultStartValue;
		semctl(semId, semNum, SETVAL, semun);

		IpcSemaphoreLock(semId, semNum, IpcExclusiveLock);
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
		MyProc->sem.semKey = semKey;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
296
	MyProc->pid = MyProcPid;
297
	MyProc->xid = InvalidTransactionId;
298 299 300
#ifdef LowLevelLocking
	MyProc->xmin = InvalidTransactionId;
#endif
301 302 303 304 305 306

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
307
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
308 309

	/* -------------------------
310
	 * Install ourselves in the shmem index table.	The name to
311 312 313 314 315 316
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
317
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
318 319 320 321 322
		elog(FATAL, "InitProc: ShmemPID table broken");

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

323
	on_shmem_exit(ProcKill, (caddr_t) MyProcPid);
324 325

	ProcInitialized = TRUE;
326 327 328 329 330 331 332 333 334
}

/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
335 336 337
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
338 339 340 341
}

/*
 * ProcRemove -
342 343 344 345 346
 *	  used by the postmaster to clean up the global tables. This also frees
 *	  up the semaphore used for the lmgr of the process. (We have to do
 *	  this is the postmaster instead of doing a IpcSemaphoreKill on exiting
 *	  the process because the semaphore set is shared among backends and
 *	  we don't want to remove other's semaphores on exit.)
347 348 349 350
 */
bool
ProcRemove(int pid)
{
351 352
	SHMEM_OFFSET location;
	PROC	   *proc;
353 354 355 356 357

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
358
		return FALSE;
359 360 361 362 363 364 365 366 367 368 369
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

	ProcFreeSem(proc->sem.semKey, proc->sem.semNum);

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

370
	return TRUE;
371 372 373 374
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
375
 *		this process. Release any of its held spin locks.
376 377 378 379
 */
static void
ProcKill(int exitStatus, int pid)
{
380 381
	PROC	   *proc;
	SHMEM_OFFSET location;
382 383 384 385 386 387 388 389 390 391

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
392
	ShmemPIDLookup(MyProcPid, &location);
393 394 395 396 397
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

398 399 400
	Assert(proc == MyProc || pid != MyProcPid);

	MyProc = NULL;
401 402 403 404 405 406

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
407
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
408

409
#ifdef USER_LOCKS
410

M
 
Marc G. Fournier 已提交
411 412 413 414
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
415 416
#endif

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
		Assert(proc->waitLock->waitProcs.size > 0);
		SHMQueueDelete(&(proc->links));
		--proc->waitLock->waitProcs.size;
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

	return;
432 433 434 435
}

/*
 * ProcQueue package: routines for putting processes to sleep
436
 *		and  waking them up
437 438 439 440 441 442 443 444
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
445
#ifdef NOT_USED
446
PROC_QUEUE *
447 448
ProcQueueAlloc(char *name)
{
449 450
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
451 452 453
	ShmemInitStruct(name, (unsigned) sizeof(PROC_QUEUE), &found);

	if (!queue)
454
		return NULL;
455 456
	if (!found)
		ProcQueueInit(queue);
457
	return queue;
458
}
459

460
#endif
461 462 463 464 465

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
466
ProcQueueInit(PROC_QUEUE *queue)
467
{
468 469
	SHMQueueInit(&(queue->links));
	queue->size = 0;
470 471 472 473 474 475 476 477 478 479 480 481
}



/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
 * ASSUME: that no one will fiddle with the queue until after
482
 *		we release the spin lock.
483 484 485 486
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
487
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
488
		  SPINLOCK spinlock,
489
		  int token,			/* lockmode */
490
		  int prio,
M
 
Marc G. Fournier 已提交
491
		  LOCK *lock,
492
		  TransactionId xid)	/* needed by user locks, see below */
493
{
494
	int			i;
495
	PROC	   *proc;
B
Bruce Momjian 已提交
496 497 498
	bool		deadlock_checked = false;
	struct itimerval timeval,
				dummy;
499

500
	/*
501 502 503 504 505 506 507 508
	 * If the first entries in the waitQueue have a greater priority than
	 * we have, we must be a reader, and they must be a writers, and we
	 * must be here because the current holder is a writer or a reader but
	 * we don't share shared locks if a writer is waiting. We put
	 * ourselves after the writers.  This way, we have a FIFO, but keep
	 * the readers together to give them decent priority, and no one
	 * starves.  Because we group all readers together, a non-empty queue
	 * only has a few possible configurations:
509
	 *
510 511
	 * [readers] [writers] [readers][writers] [writers][readers]
	 * [writers][readers][writers]
512
	 *
513 514 515 516
	 * In a full queue, we would have a reader holding a lock, then a writer
	 * gets the lock, then a bunch of readers, made up of readers who
	 * could not share the first readlock because a writer was waiting,
	 * and new readers arriving while the writer had the lock.
517 518
	 *
	 */
B
Bruce Momjian 已提交
519
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
520 521

	/* If we are a reader, and they are writers, skip past them */
522
	for (i = 0; i < waitQueue->size && proc->prio > prio; i++)
523 524 525
		proc = (PROC *) MAKE_PTR(proc->links.prev);

	/* The rest of the queue is FIFO, with readers first, writers last */
526
	for (; i < waitQueue->size && proc->prio <= prio; i++)
527
		proc = (PROC *) MAKE_PTR(proc->links.prev);
528 529 530 531

	MyProc->prio = prio;
	MyProc->token = token;
	MyProc->waitLock = lock;
532

M
 
Marc G. Fournier 已提交
533 534 535 536 537 538 539 540 541
#ifdef USER_LOCKS
	/* -------------------
	 * Currently, we only need this for the ProcWakeup routines.
	 * This must be 0 for user lock, so we can't just use the value
	 * from GetCurrentTransactionId().
	 * -------------------
	 */
	TransactionIdStore(xid, &MyProc->xid);
#else
542
#ifndef LowLevelLocking
543 544 545 546 547
	/* -------------------
	 * currently, we only need this for the ProcWakeup routines
	 * -------------------
	 */
	TransactionIdStore((TransactionId) GetCurrentTransactionId(), &MyProc->xid);
M
 
Marc G. Fournier 已提交
548
#endif
549
#endif
550 551 552 553 554 555 556

	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
557
	waitQueue->size++;
558 559 560 561

	SpinRelease(spinlock);

	/* --------------
B
Bruce Momjian 已提交
562
	 * We set this so we can wake up periodically and check for a deadlock.
B
Bruce Momjian 已提交
563 564
	 * If a deadlock is detected, the handler releases the processes
	 * semaphore and aborts the current transaction.
B
Bruce Momjian 已提交
565 566 567
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
568 569
	 * --------------
	 */
B
Bruce Momjian 已提交
570 571 572
	MemSet(&timeval, 0, sizeof(struct itimerval));
	timeval.it_value.tv_sec = \
		(DeadlockCheckTimer ? DeadlockCheckTimer : DEADLOCK_CHECK_TIMER);
573

B
Bruce Momjian 已提交
574 575
	do
	{
576
		MyProc->errType = NO_ERROR;		/* reset flag after deadlock check */
577

B
Bruce Momjian 已提交
578 579 580 581 582
		if (!deadlock_checked)
			if (setitimer(ITIMER_REAL, &timeval, &dummy))
				elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
		deadlock_checked = true;

B
Bruce Momjian 已提交
583 584 585 586 587 588
		/* --------------
		 * if someone wakes us between SpinRelease and IpcSemaphoreLock,
		 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
		 * the semaphore implementation.
		 * --------------
		 */
M
 
Marc G. Fournier 已提交
589 590
		IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum,
						 IpcExclusiveLock);
591 592 593
	} while (MyProc->errType == STATUS_NOT_FOUND);		/* sleep after deadlock
														 * check */

B
Bruce Momjian 已提交
594 595 596 597 598 599 600 601
	/* ---------------
	 * We were awoken before a timeout - now disable the timer
	 * ---------------
	 */
	timeval.it_value.tv_sec = 0;
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to diable timer for process wakeup");

602 603 604 605 606 607 608
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

M
 
Marc G. Fournier 已提交
609 610
#ifdef LOCK_MGR_DEBUG
	/* Just to get meaningful debug messages from DumpLocks() */
611
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
612 613
#endif

614
	return MyProc->errType;
615 616 617 618 619 620
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
621 622
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
623
 */
624
static PROC *
625
ProcWakeup(PROC *proc, int errType)
626
{
627
	PROC	   *retProc;
628 629 630 631 632

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
633
		return (PROC *) NULL;
634 635 636 637 638 639 640 641 642 643 644 645

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum, IpcExclusiveLock);

	return retProc;
646 647 648 649
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
650
 *		released.
651 652
 */
int
653
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
654
{
655 656
	PROC	   *proc;
	int			count;
M
 
Marc G. Fournier 已提交
657 658 659 660 661
	int			trace_flag;
	int			last_locktype = -1;
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
662 663

	if (!queue->size)
664
		return STATUS_NOT_FOUND;
665 666 667

	proc = (PROC *) MAKE_PTR(queue->links.prev);
	count = 0;
M
 
Marc G. Fournier 已提交
668 669
	while ((queue_size--) && (proc))
	{
670

M
 
Marc G. Fournier 已提交
671
		/*
672 673
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
674 675 676 677 678 679 680 681
		 */
		if (proc->token == last_locktype)
			continue;

		/*
		 * This proc conflicts with locks held by others, ignored.
		 */
		if (LockResolveConflicts(lockmethod,
682
								 lock,
683
								 proc->token,
M
 
Marc G. Fournier 已提交
684 685 686 687 688 689
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
			last_locktype = proc->token;
			continue;
		}
690 691 692 693 694 695 696

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
697
		GrantLock(lock, proc->token);
698 699 700

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
701
		 * returns the next proc in chain.
702 703 704
		 */

		count++;
M
 
Marc G. Fournier 已提交
705 706
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
707
	}
708

M
 
Marc G. Fournier 已提交
709 710
	Assert(queue->size >= 0);

711
	if (count)
712
		return STATUS_OK;
713 714
	else
	{
715
		/* Something is still blocking us.	May have deadlocked. */
M
 
Marc G. Fournier 已提交
716 717 718 719 720 721 722 723 724
		trace_flag = (lock->tag.lockmethod == USER_LOCKMETHOD) ? \
			TRACE_USERLOCKS : TRACE_LOCKS;
		TPRINTF(trace_flag,
				"ProcLockWakeup: lock(%x) can't wake up any process",
				MAKE_OFFSET(lock));
#ifdef DEADLOCK_DEBUG
		if (pg_options[trace_flag] >= 2)
			DumpAllLocks();
#endif
725
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
726
	}
727 728 729
}

void
730
ProcAddLock(SHM_QUEUE *elem)
731
{
732
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
733 734 735
}

/* --------------------
B
Bruce Momjian 已提交
736 737 738
 * We only get to this routine if we got SIGALRM after DEADLOCK_CHECK_TIMER
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
739
 * on a lock so that other processes don't try to wake me up and screw
740 741 742
 * up my semaphore.
 * --------------------
 */
743
static void
B
Bruce Momjian 已提交
744
HandleDeadLock(int sig)
745
{
B
Bruce Momjian 已提交
746
	LOCK	   *mywaitlock;
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
	 * Damn these names are LONG! -mer
	 * ---------------------
	 */
	if (IpcSemaphoreGetCount(MyProc->sem.semId, MyProc->sem.semNum) ==
		IpcSemaphoreDefaultStartValue)
	{
		UnlockLockTable();
		return;
	}

	/*
	 * you would think this would be unnecessary, but...
	 *
	 * this also means we've been removed already.  in some ports (e.g.,
	 * sparc and aix) the semop(2) implementation is such that we can
	 * actually end up in this handler after someone has removed us from
	 * the queue and bopped the semaphore *but the test above fails to
	 * detect the semaphore update* (presumably something weird having to
	 * do with the order in which the semaphore wakeup signal and SIGALRM
	 * get handled).
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

785
#ifdef DEADLOCK_DEBUG
M
 
Marc G. Fournier 已提交
786
	DumpAllLocks();
787 788
#endif

B
Bruce Momjian 已提交
789 790 791 792 793 794 795 796 797
	if (!DeadLockCheck(&(MyProc->lockQueue), MyProc->waitLock, true))
	{
		UnlockLockTable();
		MyProc->errType = STATUS_NOT_FOUND;
		return;
	}

	mywaitlock = MyProc->waitLock;

798 799 800 801
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
B
Bruce Momjian 已提交
802 803
	Assert(mywaitlock->waitProcs.size > 0);
	--mywaitlock->waitProcs.size;
804 805 806 807 808 809 810 811
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
	 * Unlock my semaphore so that the count is right for next time.
	 * I was awoken by a signal, not by someone unlocking my semaphore.
	 * ------------------
	 */
M
 
Marc G. Fournier 已提交
812 813
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum,
					   IpcExclusiveLock);
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();

B
Bruce Momjian 已提交
829
	elog(NOTICE, "Deadlock detected -- See the lock(l) manual page for a possible cause.");
830
	return;
831 832 833
}

void
834
ProcReleaseSpins(PROC *proc)
835
{
836
	int			i;
837 838 839 840 841 842 843

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
844
	{
845
		if (proc->sLocks[i])
846
		{
847 848
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
849 850 851 852 853
		}
	}
}

/*****************************************************************************
854
 *
855 856 857 858
 *****************************************************************************/

/*
 * ProcGetNewSemKeyAndNum -
859 860 861 862
 *	  scan the free semaphore bitmap and allocate a single semaphore from
 *	  a semaphore set. (If the semaphore set doesn't exist yet,
 *	  IpcSemaphoreCreate will create it. Otherwise, we use the existing
 *	  semaphore set.)
863 864
 */
static void
865
ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum)
866
{
867 868
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
869
	int32		fullmask = (1 << (PROC_NSEMS_PER_SET+1)) - 1;
870

871 872 873 874
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
875

876 877
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
878 879
		int			mask = 1;
		int			j;
880 881

		if (freeSemMap[i] == fullmask)
882
			continue;			/* this set is fully allocated */
883 884 885 886 887 888 889 890

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
				 * a free semaphore found. Mark it as allocated.
891
				 * Also set the bit indicating whole set is allocated.
892
				 */
893
				freeSemMap[i] |= mask + (1 << PROC_NSEMS_PER_SET);
894 895 896 897 898 899 900

				*key = ProcGlobal->currKey + i;
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
901 902
	}

903
	/* if we reach here, all the semaphores are in use. */
904
	elog(ERROR, "InitProc: cannot allocate a free semaphore");
905 906 907 908
}

/*
 * ProcFreeSem -
909
 *	  free up our semaphore in the semaphore set.
910 911 912 913
 */
static void
ProcFreeSem(IpcSemaphoreKey semKey, int semNum)
{
914 915 916
	int			mask;
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
917

918 919 920
	i = semKey - ProcGlobal->currKey;
	mask = ~(1 << semNum);
	freeSemMap[i] &= mask;
921

922 923 924 925 926 927
	/* Formerly we'd release a semaphore set if it was now completely unused,
	 * but now we keep the semaphores to ensure we won't run out when
	 * starting new backends --- cf. InitProcGlobal.  Note that the
	 * PROC_NSEMS_PER_SET+1'st bit of the freeSemMap entry remains set to
	 * indicate it is still allocated; ProcFreeAllSemaphores() needs that.
	 */
928 929 930 931
}

/*
 * ProcFreeAllSemaphores -
932 933 934
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
935
 */
936
static void
937 938
ProcFreeAllSemaphores()
{
939 940
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
941

942 943 944 945 946
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
		if (freeSemMap[i] != 0)
			IpcSemaphoreKill(ProcGlobal->currKey + i);
	}
947
}