proc.c 21.0 KB
Newer Older
1 2 3
/*-------------------------------------------------------------------------
 *
 * proc.c--
4
 *	  routines to manage per-process shared memory data structure
5 6 7 8 9
 *
 * Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
10
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.31 1998/02/19 15:04:45 momjian Exp $
11 12 13 14
 *
 *-------------------------------------------------------------------------
 */
/*
15 16
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
17 18 19
 *
 *
 * Interface (a):
20 21 22
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
23 24 25 26 27 28 29 30 31 32
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
33
 *		associated with the process.
34 35
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
36 37 38 39 40 41
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
42 43
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
44 45 46 47
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
48
 *
49
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.31 1998/02/19 15:04:45 momjian Exp $
50 51 52 53
 */
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
54
#include <signal.h>
55
#include <sys/types.h>
M
Marc G. Fournier 已提交
56

57 58 59 60 61
#if defined(sparc_solaris)
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

M
Marc G. Fournier 已提交
62
#include "postgres.h"
63
#include "miscadmin.h"
64
#include "libpq/pqsignal.h"
65 66 67 68

#include "access/xact.h"
#include "utils/hsearch.h"

69 70 71
#include "storage/ipc.h"
/* In Ultrix, sem.h must be included after ipc.h */
#include <sys/sem.h>
72
#include "storage/buf.h"
73
#include "storage/lock.h"
B
Bruce Momjian 已提交
74
#include "storage/lmgr.h"
75 76 77 78
#include "storage/shmem.h"
#include "storage/spin.h"
#include "storage/proc.h"

79
static void HandleDeadLock(int sig);
80
static PROC *ProcWakeup(PROC *proc, int errType);
81

82 83 84 85 86 87 88
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
89
SPINLOCK	ProcStructLock;
90 91 92 93 94

/*
 * For cleanup routines.  Don't cleanup if the initialization
 * has not happened.
 */
95
static bool ProcInitialized = FALSE;
96 97 98

static PROC_HDR *ProcGlobal = NULL;

99
PROC	   *MyProc = NULL;
100

101
static void ProcKill(int exitStatus, int pid);
102
static void ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum);
103
static void ProcFreeSem(IpcSemaphoreKey semKey, int semNum);
104 105 106

/*
 * InitProcGlobal -
107 108 109 110 111
 *	  initializes the global process table. We put it here so that
 *	  the postmaster can do this initialization. (ProcFreeAllSem needs
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
112 113 114 115
 */
void
InitProcGlobal(IPCKey key)
{
116
	bool		found = false;
117

118 119 120
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
121

122 123 124 125 126
	/* --------------------
	 * We're the first - initialize.
	 * --------------------
	 */
	if (!found)
127
	{
128
		int			i;
129

130 131 132 133
		ProcGlobal->freeProcs = INVALID_OFFSET;
		ProcGlobal->currKey = IPCGetProcessSemaphoreInitKey(key);
		for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
			ProcGlobal->freeSemMap[i] = 0;
134 135 136 137 138 139 140 141 142 143 144
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
InitProcess(IPCKey key)
{
145 146 147 148
	bool		found = false;
	int			semstat;
	unsigned long location,
				myOffset;
149 150 151 152 153 154 155 156 157 158 159 160 161

	/* ------------------
	 * Routine called if deadlock timer goes off. See ProcSleep()
	 * ------------------
	 */
	pqsignal(SIGALRM, HandleDeadLock);

	SpinAcquire(ProcStructLock);

	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
	if (!found)
162
	{
163
		/* this should not happen. InitProcGlobal() is called before this. */
164
		elog(ERROR, "InitProcess: Proc Header uninitialized");
165
	}
166 167

	if (MyProc != NULL)
168
	{
169
		SpinRelease(ProcStructLock);
170
		elog(ERROR, "ProcInit: you already exist");
171
		return;
172
	}
173 174 175 176 177 178

	/* try to get a proc from the free list first */

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
179
	{
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
		 * have to allocate one.  We can't use the normal binding table
		 * mechanism because the proc structure is stored by PID instead
		 * of by a global name (need to look it up by PID when we cleanup
		 * dead processes).
		 */

		MyProc = (PROC *) ShmemAlloc((unsigned) sizeof(PROC));
		if (!MyProc)
195
		{
196 197
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
198
		}
199 200 201

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
202
	}
203

204
	/*
205 206 207
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
208
	 */
B
Bruce Momjian 已提交
209
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
210 211 212 213 214
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
215 216 217 218
		IPCKey		semKey;
		int			semNum;
		int			semId;
		union semun semun;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

		ProcGetNewSemKeyAndNum(&semKey, &semNum);

		semId = IpcSemaphoreCreate(semKey,
								   PROC_NSEMS_PER_SET,
								   IPCProtection,
								   IpcSemaphoreDefaultStartValue,
								   0,
								   &semstat);

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
		semun.val = IpcSemaphoreDefaultStartValue;
		semctl(semId, semNum, SETVAL, semun);

		IpcSemaphoreLock(semId, semNum, IpcExclusiveLock);
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
		MyProc->sem.semKey = semKey;
	}
	else
	{
		MyProc->sem.semId = -1;
	}

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

252 253
	MyProc->pid = 0;
#if 0
B
Bruce Momjian 已提交
254
	MyProc->pid = MyProcPid;
255
#endif
256 257 258 259 260 261 262
	MyProc->xid = InvalidTransactionId;

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
263
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
264 265 266 267 268 269 270 271 272

	/* -------------------------
	 * Install ourselves in the binding table.	The name to
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
273
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
274
	{
275
		elog(FATAL, "InitProc: ShmemPID table broken");
276
	}
277 278 279 280

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

B
Bruce Momjian 已提交
281
	on_exitpg(ProcKill, (caddr_t) MyProcPid);
282 283

	ProcInitialized = TRUE;
284 285 286 287 288 289 290 291 292
}

/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
293 294 295
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
296 297 298 299
}

/*
 * ProcRemove -
300 301 302 303 304
 *	  used by the postmaster to clean up the global tables. This also frees
 *	  up the semaphore used for the lmgr of the process. (We have to do
 *	  this is the postmaster instead of doing a IpcSemaphoreKill on exiting
 *	  the process because the semaphore set is shared among backends and
 *	  we don't want to remove other's semaphores on exit.)
305 306 307 308
 */
bool
ProcRemove(int pid)
{
309 310
	SHMEM_OFFSET location;
	PROC	   *proc;
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
		return (FALSE);
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

	ProcFreeSem(proc->sem.semKey, proc->sem.semNum);

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

	return (TRUE);
329 330 331 332
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
333
 *		this process. Release any of its held spin locks.
334 335 336 337
 */
static void
ProcKill(int exitStatus, int pid)
{
338 339
	PROC	   *proc;
	SHMEM_OFFSET location;
340 341 342 343 344 345 346 347 348 349

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
350
	ShmemPIDLookup(MyProcPid, &location);
351 352 353 354 355 356 357
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

	if (proc != MyProc)
	{
B
Bruce Momjian 已提交
358
		Assert(pid != MyProcPid);
359 360 361 362 363 364 365 366 367 368 369
	}
	else
		MyProc = NULL;

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
	LockReleaseAll(1, &proc->lockQueue);

370
#ifdef USER_LOCKS
371
	LockReleaseAll(0, &proc->lockQueue);
372 373
#endif

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
		Assert(proc->waitLock->waitProcs.size > 0);
		SHMQueueDelete(&(proc->links));
		--proc->waitLock->waitProcs.size;
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

	return;
389 390 391 392
}

/*
 * ProcQueue package: routines for putting processes to sleep
393
 *		and  waking them up
394 395 396 397 398 399 400 401
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
402
#ifdef NOT_USED
403
PROC_QUEUE *
404 405
ProcQueueAlloc(char *name)
{
406 407
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
408 409 410
	ShmemInitStruct(name, (unsigned) sizeof(PROC_QUEUE), &found);

	if (!queue)
411
	{
412
		return (NULL);
413
	}
414
	if (!found)
415
	{
416
		ProcQueueInit(queue);
417
	}
418
	return (queue);
419
}
420

421
#endif
422 423 424 425 426

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
427
ProcQueueInit(PROC_QUEUE *queue)
428
{
429 430
	SHMQueueInit(&(queue->links));
	queue->size = 0;
431 432 433 434 435 436 437 438 439 440 441 442
}



/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
 * ASSUME: that no one will fiddle with the queue until after
443
 *		we release the spin lock.
444 445 446 447
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
B
Bruce Momjian 已提交
448
ProcSleep(PROC_QUEUE *waitQueue,
449 450 451
		  SPINLOCK spinlock,
		  int token,
		  int prio,
452
		  LOCK *lock)
453
{
454
	int			i = 0;
455
	PROC	   *proc;
456
	struct itimerval timeval,
457
				dummy;
458

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	/*
	 *	If the first entries in the waitQueue have a greater priority than
	 *	we have, we must be a reader, and they must be a writers, and we
	 *	must be here because the current holder is a writer or a
	 *	reader but we don't share shared locks if a writer is waiting.
	 *	We put ourselves after the writers.  This way, we have a FIFO, but
	 *	keep the readers together to give them decent priority, and no one
	 *	starves.  Because we group all readers together, a non-empty queue
	 *	only has a few possible configurations:
	 *
	 *	[readers]
	 *	[writers]
	 *	[readers][writers]
	 *	[writers][readers]
	 *	[writers][readers][writers]
	 *
	 *	In a full queue, we would have a reader holding a lock, then a
	 *	writer gets the lock, then a bunch of readers, made up of readers
	 *	who could not share the first readlock because a writer was waiting,
	 *	and new readers arriving while the writer had the lock.
	 *
	 */
B
Bruce Momjian 已提交
481
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
482 483 484 485 486 487 488 489

	/* If we are a reader, and they are writers, skip past them */
	while (i++ < waitQueue->size && proc->prio > prio)
		proc = (PROC *) MAKE_PTR(proc->links.prev);

	/* The rest of the queue is FIFO, with readers first, writers last */
	while (i++ < waitQueue->size && proc->prio <= prio)
		proc = (PROC *) MAKE_PTR(proc->links.prev);
490 491 492 493

	MyProc->prio = prio;
	MyProc->token = token;
	MyProc->waitLock = lock;
B
Bruce Momjian 已提交
494
	
495 496 497 498 499 500 501 502 503 504 505 506
	/* -------------------
	 * currently, we only need this for the ProcWakeup routines
	 * -------------------
	 */
	TransactionIdStore((TransactionId) GetCurrentTransactionId(), &MyProc->xid);

	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
507
	waitQueue->size++;
508 509 510 511

	SpinRelease(spinlock);

	/* --------------
B
Bruce Momjian 已提交
512 513 514
	 * We set this so we can wake up periodically and check for a deadlock.
	 * If a deadlock is detected, the handler releases the processes
	 * semaphore and aborts the current transaction.
515 516 517 518 519
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
	 * --------------
	 */
B
Bruce Momjian 已提交
520
	MemSet(&timeval, 0, sizeof(struct itimerval));
B
Bruce Momjian 已提交
521
	timeval.it_value.tv_sec = DEADLOCK_CHECK_TIMER;
522

B
Bruce Momjian 已提交
523 524 525
	do
	{
		MyProc->errType = NO_ERROR; /* reset flag after deadlock check */
526

B
Bruce Momjian 已提交
527 528
		if (setitimer(ITIMER_REAL, &timeval, &dummy))
			elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
529

B
Bruce Momjian 已提交
530 531 532 533 534 535 536 537 538
		/* --------------
		 * if someone wakes us between SpinRelease and IpcSemaphoreLock,
		 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
		 * the semaphore implementation.
		 * --------------
		 */
		IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum, IpcExclusiveLock);
	} while (MyProc->errType == STATUS_NOT_FOUND); /* sleep after deadlock check */
	
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
	/* ---------------
	 * We were awoken before a timeout - now disable the timer
	 * ---------------
	 */
	timeval.it_value.tv_sec = 0;


	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to diable timer for process wakeup");

	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

	return (MyProc->errType);
557 558 559 560 561 562
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
563 564
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
565
 */
566
static PROC *
567
ProcWakeup(PROC *proc, int errType)
568
{
569
	PROC	   *retProc;
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
		return ((PROC *) NULL);

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum, IpcExclusiveLock);

	return retProc;
588 589 590 591
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
592
 *		released.
593 594
 */
int
595
ProcLockWakeup(PROC_QUEUE *queue, char *ltable, char *lock)
596
{
597 598
	PROC	   *proc;
	int			count;
599 600 601 602 603 604 605 606 607 608

	if (!queue->size)
		return (STATUS_NOT_FOUND);

	proc = (PROC *) MAKE_PTR(queue->links.prev);
	count = 0;
	while ((LockResolveConflicts((LOCKTAB *) ltable,
								 (LOCK *) lock,
								 proc->token,
								 proc->xid) == STATUS_OK))
609
	{
610 611 612 613 614 615 616 617 618 619 620 621

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
		GrantLock((LOCK *) lock, proc->token);
		queue->size--;

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
622
		 * returns the next proc in chain.
623 624 625 626 627 628
		 */
		proc = ProcWakeup(proc, NO_ERROR);

		count++;
		if (!proc || queue->size == 0)
			break;
629
	}
630 631 632 633 634 635

	if (count)
		return (STATUS_OK);
	else
		/* Something is still blocking us.	May have deadlocked. */
		return (STATUS_NOT_FOUND);
636 637 638
}

void
639
ProcAddLock(SHM_QUEUE *elem)
640
{
641
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
642 643 644
}

/* --------------------
B
Bruce Momjian 已提交
645 646 647
 * We only get to this routine if we got SIGALRM after DEADLOCK_CHECK_TIMER
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
648
 * on a lock so that other processes don't try to wake me up and screw
649 650 651
 * up my semaphore.
 * --------------------
 */
652
static void
M
Fixes:  
Marc G. Fournier 已提交
653
HandleDeadLock(int sig)
654
{
B
Bruce Momjian 已提交
655
	LOCK	   *mywaitlock;
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
	 * Damn these names are LONG! -mer
	 * ---------------------
	 */
	if (IpcSemaphoreGetCount(MyProc->sem.semId, MyProc->sem.semNum) ==
		IpcSemaphoreDefaultStartValue)
	{
		UnlockLockTable();
		return;
	}

	/*
	 * you would think this would be unnecessary, but...
	 *
	 * this also means we've been removed already.  in some ports (e.g.,
	 * sparc and aix) the semop(2) implementation is such that we can
	 * actually end up in this handler after someone has removed us from
	 * the queue and bopped the semaphore *but the test above fails to
	 * detect the semaphore update* (presumably something weird having to
	 * do with the order in which the semaphore wakeup signal and SIGALRM
	 * get handled).
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

694
#ifdef DEADLOCK_DEBUG
695
	DumpLocks();
696 697
#endif

B
Bruce Momjian 已提交
698 699 700 701 702 703 704 705 706
	if (!DeadLockCheck(&(MyProc->lockQueue), MyProc->waitLock, true))
	{
		UnlockLockTable();
		MyProc->errType = STATUS_NOT_FOUND;
		return;
	}

	mywaitlock = MyProc->waitLock;

707 708 709 710
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
B
Bruce Momjian 已提交
711 712
	Assert(mywaitlock->waitProcs.size > 0);
	--mywaitlock->waitProcs.size;
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
	 * Unlock my semaphore so that the count is right for next time.
	 * I was awoken by a signal, not by someone unlocking my semaphore.
	 * ------------------
	 */
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum, IpcExclusiveLock);

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();

B
Bruce Momjian 已提交
737
	elog(NOTICE, "Deadlock detected -- See the lock(l) manual page for a possible cause.");
738
	return;
739 740 741
}

void
742
ProcReleaseSpins(PROC *proc)
743
{
744
	int			i;
745 746 747 748 749 750 751

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
752
	{
753
		if (proc->sLocks[i])
754
		{
755 756
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
757 758 759 760 761
		}
	}
}

/*****************************************************************************
762
 *
763 764 765 766
 *****************************************************************************/

/*
 * ProcGetNewSemKeyAndNum -
767 768 769 770
 *	  scan the free semaphore bitmap and allocate a single semaphore from
 *	  a semaphore set. (If the semaphore set doesn't exist yet,
 *	  IpcSemaphoreCreate will create it. Otherwise, we use the existing
 *	  semaphore set.)
771 772
 */
static void
773
ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum)
774
{
775 776 777
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
	unsigned int fullmask;
778

779 780 781 782 783 784 785
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
	fullmask = ~0 >> (32 - PROC_NSEMS_PER_SET);
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
786 787
		int			mask = 1;
		int			j;
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807

		if (freeSemMap[i] == fullmask)
			continue;			/* none free for this set */

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
				 * a free semaphore found. Mark it as allocated.
				 */
				freeSemMap[i] |= mask;

				*key = ProcGlobal->currKey + i;
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
808 809
	}

810
	/* if we reach here, all the semaphores are in use. */
811
	elog(ERROR, "InitProc: cannot allocate a free semaphore");
812 813 814 815
}

/*
 * ProcFreeSem -
816 817
 *	  free up our semaphore in the semaphore set. If we're the last one
 *	  in the set, also remove the semaphore set.
818 819 820 821
 */
static void
ProcFreeSem(IpcSemaphoreKey semKey, int semNum)
{
822 823 824
	int			mask;
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
825

826 827 828
	i = semKey - ProcGlobal->currKey;
	mask = ~(1 << semNum);
	freeSemMap[i] &= mask;
829

830 831
	if (freeSemMap[i] == 0)
		IpcSemaphoreKill(semKey);
832 833 834 835
}

/*
 * ProcFreeAllSemaphores -
836 837
 *	  on exiting the postmaster, we free up all the semaphores allocated
 *	  to the lmgrs of the backends.
838 839 840 841
 */
void
ProcFreeAllSemaphores()
{
842 843
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
844

845 846 847 848 849
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
		if (freeSemMap[i] != 0)
			IpcSemaphoreKill(ProcGlobal->currKey + i);
	}
850
}