proc.c 24.1 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5 6 7 8 9
 *
 * Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
10
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.64 1999/11/07 23:08:17 momjian Exp $
11 12 13 14
 *
 *-------------------------------------------------------------------------
 */
/*
15 16
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
17 18 19
 *
 *
 * Interface (a):
20 21 22
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
23 24 25 26 27 28 29 30 31 32
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
33
 *		associated with the process.
34 35
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
36 37 38 39 40 41
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
42 43
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
44 45 46 47
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
48
 *
49
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.64 1999/11/07 23:08:17 momjian Exp $
50 51 52
 */
#include <sys/time.h>
#include <unistd.h>
53
#include <signal.h>
54
#include <sys/types.h>
M
Marc G. Fournier 已提交
55

56
#if defined(solaris_sparc) || defined(__CYGWIN__)
57 58 59 60
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

M
Marc G. Fournier 已提交
61
#include "postgres.h"
62
#include "miscadmin.h"
63
#include "libpq/pqsignal.h"
64 65


66 67
/* In Ultrix, sem.h must be included after ipc.h */
#include <sys/sem.h>
B
Bruce Momjian 已提交
68

B
Bruce Momjian 已提交
69
#include "storage/lmgr.h"
70
#include "storage/proc.h"
M
 
Marc G. Fournier 已提交
71
#include "utils/trace.h"
72

73
void HandleDeadLock(SIGNAL_ARGS);
74
static void ProcFreeAllSemaphores(void);
75

M
 
Marc G. Fournier 已提交
76 77
#define DeadlockCheckTimer pg_options[OPT_DEADLOCKTIMEOUT]

78 79 80 81 82 83 84
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
85
SPINLOCK	ProcStructLock;
86 87 88

static PROC_HDR *ProcGlobal = NULL;

89
PROC	   *MyProc = NULL;
90

91
static void ProcKill(int exitStatus, int pid);
92
static void ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum);
93
static void ProcFreeSem(IpcSemaphoreKey semKey, int semNum);
94

V
Vadim B. Mikheev 已提交
95 96
static char *DeadLockMessage = "Deadlock detected -- See the lock(l) manual page for a possible cause.";

97 98
/*
 * InitProcGlobal -
99
 *	  initializes the global process table. We put it here so that
100
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
101 102 103
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
104 105 106 107 108 109 110 111 112 113 114
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
115 116
 */
void
117
InitProcGlobal(IPCKey key, int maxBackends)
118
{
119
	bool		found = false;
120

121 122 123
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
124

125 126
	/* --------------------
	 * We're the first - initialize.
127 128
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
129 130 131
	 * --------------------
	 */
	if (!found)
132
	{
133
		int			i;
134

135 136 137 138
		ProcGlobal->freeProcs = INVALID_OFFSET;
		ProcGlobal->currKey = IPCGetProcessSemaphoreInitKey(key);
		for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
			ProcGlobal->freeSemMap[i] = 0;
139

B
Bruce Momjian 已提交
140 141 142
		/*
		 * Arrange to delete semas on exit --- set this up now so that we
		 * will clean up if pre-allocation fails...
143 144 145
		 */
		on_shmem_exit(ProcFreeAllSemaphores, NULL);

B
Bruce Momjian 已提交
146 147
		/*
		 * Pre-create the semaphores for the first maxBackends processes,
148 149 150
		 * unless we are running as a standalone backend.
		 */
		if (key != PrivateIPCKey)
151
		{
152
			for (i = 0;
B
Bruce Momjian 已提交
153
				 i < (maxBackends + PROC_NSEMS_PER_SET - 1) / PROC_NSEMS_PER_SET;
154 155 156 157 158 159 160 161 162
				 i++)
			{
				IPCKey		semKey = ProcGlobal->currKey + i;
				int			semId;

				semId = IpcSemaphoreCreate(semKey,
										   PROC_NSEMS_PER_SET,
										   IPCProtection,
										   IpcSemaphoreDefaultStartValue,
163 164 165
										   0);
				if (semId < 0)
					elog(FATAL, "InitProcGlobal: IpcSemaphoreCreate failed");
166 167 168
				/* mark this sema set allocated */
				ProcGlobal->freeSemMap[i] = (1 << PROC_NSEMS_PER_SET);
			}
169
		}
170 171 172 173 174 175 176 177 178 179 180
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
InitProcess(IPCKey key)
{
181 182 183
	bool		found = false;
	unsigned long location,
				myOffset;
184 185 186 187 188 189 190

	SpinAcquire(ProcStructLock);

	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
	if (!found)
191
	{
192
		/* this should not happen. InitProcGlobal() is called before this. */
193
		elog(STOP, "InitProcess: Proc Header uninitialized");
194
	}
195 196

	if (MyProc != NULL)
197
	{
198
		SpinRelease(ProcStructLock);
199
		elog(ERROR, "ProcInit: you already exist");
200
		return;
201
	}
202 203 204 205 206 207

	/* try to get a proc from the free list first */

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
208
	{
209 210 211 212 213 214 215
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
216 217 218 219
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
220 221 222 223
		 */

		MyProc = (PROC *) ShmemAlloc((unsigned) sizeof(PROC));
		if (!MyProc)
224
		{
225 226
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
227
		}
228 229 230

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
231
	}
232

233
	/*
234 235 236
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
237
	 */
B
Bruce Momjian 已提交
238
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
239 240 241 242 243
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
244 245 246 247
		IPCKey		semKey;
		int			semNum;
		int			semId;
		union semun semun;
248 249 250

		ProcGetNewSemKeyAndNum(&semKey, &semNum);

B
Bruce Momjian 已提交
251 252 253 254 255
		/*
		 * Note: because of the pre-allocation done in InitProcGlobal,
		 * this call should always attach to an existing semaphore. It
		 * will (try to) create a new group of semaphores only if the
		 * postmaster tries to start more backends than it said it would.
256
		 */
257 258 259 260
		semId = IpcSemaphoreCreate(semKey,
								   PROC_NSEMS_PER_SET,
								   IPCProtection,
								   IpcSemaphoreDefaultStartValue,
261
								   0);
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
		semun.val = IpcSemaphoreDefaultStartValue;
		semctl(semId, semNum, SETVAL, semun);

		IpcSemaphoreLock(semId, semNum, IpcExclusiveLock);
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
		MyProc->sem.semKey = semKey;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
284
	MyProc->pid = MyProcPid;
285
	MyProc->databaseId = MyDatabaseId;
286
	MyProc->xid = InvalidTransactionId;
287
	MyProc->xmin = InvalidTransactionId;
288 289 290 291 292 293

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
294
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
295 296

	/* -------------------------
297
	 * Install ourselves in the shmem index table.	The name to
298 299 300 301 302 303
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
304
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
305
		elog(STOP, "InitProc: ShmemPID table broken");
306 307 308 309

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

310
	on_shmem_exit(ProcKill, (caddr_t) MyProcPid);
311 312 313 314 315 316 317 318 319
}

/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
320 321 322
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
323 324 325 326
}

/*
 * ProcRemove -
327 328 329 330 331
 *	  used by the postmaster to clean up the global tables. This also frees
 *	  up the semaphore used for the lmgr of the process. (We have to do
 *	  this is the postmaster instead of doing a IpcSemaphoreKill on exiting
 *	  the process because the semaphore set is shared among backends and
 *	  we don't want to remove other's semaphores on exit.)
332 333 334 335
 */
bool
ProcRemove(int pid)
{
336 337
	SHMEM_OFFSET location;
	PROC	   *proc;
338 339 340 341 342

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
343
		return FALSE;
344 345 346 347 348 349 350 351 352 353 354
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

	ProcFreeSem(proc->sem.semKey, proc->sem.semNum);

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

355
	return TRUE;
356 357 358 359
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
360
 *		this process. Release any of its held spin locks.
361 362 363 364
 */
static void
ProcKill(int exitStatus, int pid)
{
365 366
	PROC	   *proc;
	SHMEM_OFFSET location;
367 368 369 370 371 372 373 374 375 376

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
377
	ShmemPIDLookup(MyProcPid, &location);
378 379 380 381 382
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

383 384 385
	Assert(proc == MyProc || pid != MyProcPid);

	MyProc = NULL;
386 387 388 389 390 391

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
392
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
393

394
#ifdef USER_LOCKS
395

M
 
Marc G. Fournier 已提交
396 397 398 399
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
400 401
#endif

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
		Assert(proc->waitLock->waitProcs.size > 0);
		SHMQueueDelete(&(proc->links));
		--proc->waitLock->waitProcs.size;
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

	return;
417 418 419 420
}

/*
 * ProcQueue package: routines for putting processes to sleep
421
 *		and  waking them up
422 423 424 425 426 427 428 429
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
430
#ifdef NOT_USED
431
PROC_QUEUE *
432 433
ProcQueueAlloc(char *name)
{
434 435
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
436 437 438
	ShmemInitStruct(name, (unsigned) sizeof(PROC_QUEUE), &found);

	if (!queue)
439
		return NULL;
440 441
	if (!found)
		ProcQueueInit(queue);
442
	return queue;
443
}
444

445
#endif
446 447 448 449 450

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
451
ProcQueueInit(PROC_QUEUE *queue)
452
{
453 454
	SHMQueueInit(&(queue->links));
	queue->size = 0;
455 456 457 458 459 460 461 462 463 464 465 466
}



/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
 * ASSUME: that no one will fiddle with the queue until after
467
 *		we release the spin lock.
468 469 470 471
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
472
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
473
		  LOCKMETHODCTL *lockctl,
474
		  int token,			/* lockmode */
V
Vadim B. Mikheev 已提交
475
		  LOCK *lock)
476
{
477
	int			i;
V
Vadim B. Mikheev 已提交
478
	SPINLOCK	spinlock = lockctl->masterLock;
479
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
480 481 482 483 484
	int			myMask = (1 << token);
	int			waitMask = lock->waitMask;
	int			aheadHolders[MAX_LOCKMODES];
	bool		selfConflict = (lockctl->conflictTab[token] & myMask),
				prevSame = false;
B
Bruce Momjian 已提交
485 486 487
	bool		deadlock_checked = false;
	struct itimerval timeval,
				dummy;
488

V
Vadim B. Mikheev 已提交
489 490 491
	MyProc->token = token;
	MyProc->waitLock = lock;

B
Bruce Momjian 已提交
492
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
493

V
Vadim B. Mikheev 已提交
494 495 496
	/* if we don't conflict with any waiter - be first in queue */
	if (!(lockctl->conflictTab[token] & waitMask))
		goto ins;
497

V
Vadim B. Mikheev 已提交
498 499 500
	for (i = 1; i < MAX_LOCKMODES; i++)
		aheadHolders[i] = lock->activeHolders[i];
	(aheadHolders[token])++;
501

V
Vadim B. Mikheev 已提交
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	for (i = 0; i < waitQueue->size; i++)
	{
		/* am I waiting for him ? */
		if (lockctl->conflictTab[token] & proc->holdLock)
		{
			/* is he waiting for me ? */
			if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			{
				MyProc->errType = STATUS_ERROR;
				elog(NOTICE, DeadLockMessage);
				goto rt;
			}
			/* being waiting for him - go past */
		}
		/* if he waits for me */
		else if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			break;
		/* if conflicting locks requested */
		else if (lockctl->conflictTab[proc->token] & myMask)
		{
B
Bruce Momjian 已提交
522

V
Vadim B. Mikheev 已提交
523
			/*
B
Bruce Momjian 已提交
524 525
			 * If I request non self-conflicting lock and there are others
			 * requesting the same lock just before me - stay here.
V
Vadim B. Mikheev 已提交
526 527 528 529
			 */
			if (!selfConflict && prevSame)
				break;
		}
B
Bruce Momjian 已提交
530

V
Vadim B. Mikheev 已提交
531
		/*
B
Bruce Momjian 已提交
532 533
		 * Last attempt to don't move any more: if we don't conflict with
		 * rest waiters in queue.
V
Vadim B. Mikheev 已提交
534 535 536
		 */
		else if (!(lockctl->conflictTab[token] & waitMask))
			break;
537

V
Vadim B. Mikheev 已提交
538 539 540
		prevSame = (proc->token == token);
		(aheadHolders[proc->token])++;
		if (aheadHolders[proc->token] == lock->holders[proc->token])
B
Bruce Momjian 已提交
541
			waitMask &= ~(1 << proc->token);
V
Vadim B. Mikheev 已提交
542 543
		proc = (PROC *) MAKE_PTR(proc->links.prev);
	}
544

V
Vadim B. Mikheev 已提交
545
ins:;
546 547 548 549 550 551
	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
552
	waitQueue->size++;
553

V
Vadim B. Mikheev 已提交
554
	lock->waitMask |= myMask;
555 556 557
	SpinRelease(spinlock);

	/* --------------
B
Bruce Momjian 已提交
558
	 * We set this so we can wake up periodically and check for a deadlock.
B
Bruce Momjian 已提交
559 560
	 * If a deadlock is detected, the handler releases the processes
	 * semaphore and aborts the current transaction.
B
Bruce Momjian 已提交
561 562 563
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
564 565
	 * --------------
	 */
B
Bruce Momjian 已提交
566 567 568
	MemSet(&timeval, 0, sizeof(struct itimerval));
	timeval.it_value.tv_sec = \
		(DeadlockCheckTimer ? DeadlockCheckTimer : DEADLOCK_CHECK_TIMER);
569

B
Bruce Momjian 已提交
570 571
	do
	{
572
		MyProc->errType = NO_ERROR;		/* reset flag after deadlock check */
573

B
Bruce Momjian 已提交
574 575 576 577 578
		if (!deadlock_checked)
			if (setitimer(ITIMER_REAL, &timeval, &dummy))
				elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
		deadlock_checked = true;

B
Bruce Momjian 已提交
579 580 581 582 583 584
		/* --------------
		 * if someone wakes us between SpinRelease and IpcSemaphoreLock,
		 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
		 * the semaphore implementation.
		 * --------------
		 */
M
 
Marc G. Fournier 已提交
585 586
		IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum,
						 IpcExclusiveLock);
587 588 589
	} while (MyProc->errType == STATUS_NOT_FOUND);		/* sleep after deadlock
														 * check */

B
Bruce Momjian 已提交
590 591 592 593 594 595 596 597
	/* ---------------
	 * We were awoken before a timeout - now disable the timer
	 * ---------------
	 */
	timeval.it_value.tv_sec = 0;
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to diable timer for process wakeup");

598 599 600 601 602 603 604
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

V
Vadim B. Mikheev 已提交
605 606
rt:;

M
 
Marc G. Fournier 已提交
607 608
#ifdef LOCK_MGR_DEBUG
	/* Just to get meaningful debug messages from DumpLocks() */
609
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
610 611
#endif

612
	return MyProc->errType;
613 614 615 616 617 618
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
619 620
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
621
 */
B
Bruce Momjian 已提交
622
PROC *
623
ProcWakeup(PROC *proc, int errType)
624
{
625
	PROC	   *retProc;
626 627 628 629 630

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
631
		return (PROC *) NULL;
632 633 634 635 636 637 638 639 640 641 642 643

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum, IpcExclusiveLock);

	return retProc;
644 645 646 647
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
648
 *		released.
649 650
 */
int
651
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
652
{
653
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
654
	int			count = 0;
M
 
Marc G. Fournier 已提交
655
	int			trace_flag;
V
Vadim B. Mikheev 已提交
656
	int			last_locktype = 0;
M
 
Marc G. Fournier 已提交
657 658 659
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
660 661

	if (!queue->size)
662
		return STATUS_NOT_FOUND;
663 664

	proc = (PROC *) MAKE_PTR(queue->links.prev);
M
 
Marc G. Fournier 已提交
665 666
	while ((queue_size--) && (proc))
	{
667

M
 
Marc G. Fournier 已提交
668
		/*
669 670
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
671 672 673 674 675
		 */
		if (proc->token == last_locktype)
			continue;

		/*
V
Vadim B. Mikheev 已提交
676
		 * Does this proc conflict with locks held by others ?
M
 
Marc G. Fournier 已提交
677 678
		 */
		if (LockResolveConflicts(lockmethod,
679
								 lock,
680
								 proc->token,
M
 
Marc G. Fournier 已提交
681 682 683
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
V
Vadim B. Mikheev 已提交
684 685
			if (count != 0)
				break;
M
 
Marc G. Fournier 已提交
686 687 688
			last_locktype = proc->token;
			continue;
		}
689 690 691 692 693 694 695

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
696
		GrantLock(lock, proc->token);
697 698 699

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
700
		 * returns the next proc in chain.
701 702 703
		 */

		count++;
M
 
Marc G. Fournier 已提交
704 705
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
706
	}
707

M
 
Marc G. Fournier 已提交
708 709
	Assert(queue->size >= 0);

710
	if (count)
711
		return STATUS_OK;
712 713
	else
	{
714
		/* Something is still blocking us.	May have deadlocked. */
M
 
Marc G. Fournier 已提交
715 716 717 718 719 720 721 722 723
		trace_flag = (lock->tag.lockmethod == USER_LOCKMETHOD) ? \
			TRACE_USERLOCKS : TRACE_LOCKS;
		TPRINTF(trace_flag,
				"ProcLockWakeup: lock(%x) can't wake up any process",
				MAKE_OFFSET(lock));
#ifdef DEADLOCK_DEBUG
		if (pg_options[trace_flag] >= 2)
			DumpAllLocks();
#endif
724
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
725
	}
726 727 728
}

void
729
ProcAddLock(SHM_QUEUE *elem)
730
{
731
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
732 733 734
}

/* --------------------
B
Bruce Momjian 已提交
735 736 737
 * We only get to this routine if we got SIGALRM after DEADLOCK_CHECK_TIMER
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
738
 * on a lock so that other processes don't try to wake me up and screw
739 740 741
 * up my semaphore.
 * --------------------
 */
742 743
void
HandleDeadLock(SIGNAL_ARGS)
744
{
B
Bruce Momjian 已提交
745
	LOCK	   *mywaitlock;
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
	 * Damn these names are LONG! -mer
	 * ---------------------
	 */
	if (IpcSemaphoreGetCount(MyProc->sem.semId, MyProc->sem.semNum) ==
		IpcSemaphoreDefaultStartValue)
	{
		UnlockLockTable();
		return;
	}

	/*
	 * you would think this would be unnecessary, but...
	 *
	 * this also means we've been removed already.  in some ports (e.g.,
	 * sparc and aix) the semop(2) implementation is such that we can
	 * actually end up in this handler after someone has removed us from
	 * the queue and bopped the semaphore *but the test above fails to
	 * detect the semaphore update* (presumably something weird having to
	 * do with the order in which the semaphore wakeup signal and SIGALRM
	 * get handled).
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

784
#ifdef DEADLOCK_DEBUG
M
 
Marc G. Fournier 已提交
785
	DumpAllLocks();
786 787
#endif

B
Bruce Momjian 已提交
788 789
	MyProc->errType = STATUS_NOT_FOUND;
	if (!DeadLockCheck(MyProc, MyProc->waitLock))
B
Bruce Momjian 已提交
790 791 792 793 794 795 796
	{
		UnlockLockTable();
		return;
	}

	mywaitlock = MyProc->waitLock;

797 798 799 800
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
B
Bruce Momjian 已提交
801 802
	Assert(mywaitlock->waitProcs.size > 0);
	--mywaitlock->waitProcs.size;
803 804 805 806 807 808 809 810
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
	 * Unlock my semaphore so that the count is right for next time.
	 * I was awoken by a signal, not by someone unlocking my semaphore.
	 * ------------------
	 */
M
 
Marc G. Fournier 已提交
811 812
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum,
					   IpcExclusiveLock);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();

V
Vadim B. Mikheev 已提交
828
	elog(NOTICE, DeadLockMessage);
829
	return;
830 831 832
}

void
833
ProcReleaseSpins(PROC *proc)
834
{
835
	int			i;
836 837 838 839 840 841 842

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
843
	{
844
		if (proc->sLocks[i])
845
		{
846 847
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
848 849 850 851 852
		}
	}
}

/*****************************************************************************
853
 *
854 855 856 857
 *****************************************************************************/

/*
 * ProcGetNewSemKeyAndNum -
858 859 860 861
 *	  scan the free semaphore bitmap and allocate a single semaphore from
 *	  a semaphore set. (If the semaphore set doesn't exist yet,
 *	  IpcSemaphoreCreate will create it. Otherwise, we use the existing
 *	  semaphore set.)
862 863
 */
static void
864
ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum)
865
{
866 867
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
B
Bruce Momjian 已提交
868
	int32		fullmask = (1 << (PROC_NSEMS_PER_SET + 1)) - 1;
869

870 871 872 873
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
874

875 876
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
877 878
		int			mask = 1;
		int			j;
879 880

		if (freeSemMap[i] == fullmask)
881
			continue;			/* this set is fully allocated */
882 883 884 885 886 887 888

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
B
Bruce Momjian 已提交
889 890
				 * a free semaphore found. Mark it as allocated. Also set
				 * the bit indicating whole set is allocated.
891
				 */
892
				freeSemMap[i] |= mask + (1 << PROC_NSEMS_PER_SET);
893 894 895 896 897 898 899

				*key = ProcGlobal->currKey + i;
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
900 901
	}

902
	/* if we reach here, all the semaphores are in use. */
903
	elog(ERROR, "InitProc: cannot allocate a free semaphore");
904 905 906 907
}

/*
 * ProcFreeSem -
908
 *	  free up our semaphore in the semaphore set.
909 910 911 912
 */
static void
ProcFreeSem(IpcSemaphoreKey semKey, int semNum)
{
913 914 915
	int			mask;
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
916

917 918 919
	i = semKey - ProcGlobal->currKey;
	mask = ~(1 << semNum);
	freeSemMap[i] &= mask;
920

B
Bruce Momjian 已提交
921 922 923 924
	/*
	 * Formerly we'd release a semaphore set if it was now completely
	 * unused, but now we keep the semaphores to ensure we won't run out
	 * when starting new backends --- cf. InitProcGlobal.  Note that the
925 926 927
	 * PROC_NSEMS_PER_SET+1'st bit of the freeSemMap entry remains set to
	 * indicate it is still allocated; ProcFreeAllSemaphores() needs that.
	 */
928 929 930 931
}

/*
 * ProcFreeAllSemaphores -
932 933 934
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
935
 */
936
static void
937 938
ProcFreeAllSemaphores()
{
939 940
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
941

942 943 944 945 946
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
		if (freeSemMap[i] != 0)
			IpcSemaphoreKill(ProcGlobal->currKey + i);
	}
947
}