proc.c 25.7 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
B
Add:  
Bruce Momjian 已提交
6 7
 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.80 2000/10/02 19:42:48 petere Exp $
12 13 14 15
 *
 *-------------------------------------------------------------------------
 */
/*
16 17
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
18 19 20
 *
 *
 * Interface (a):
21 22 23
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
24 25 26 27 28 29 30 31 32 33
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
34
 *		associated with the process.
35 36
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
37 38 39 40 41 42
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
43 44
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
45 46 47 48
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
49
 *
50
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.80 2000/10/02 19:42:48 petere Exp $
51
 */
52 53
#include "postgres.h"

54 55
#include <sys/time.h>
#include <unistd.h>
56
#include <signal.h>
57
#include <sys/types.h>
M
Marc G. Fournier 已提交
58

59
#if defined(solaris_sparc) || defined(__CYGWIN__)
60 61 62 63 64
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

#include "miscadmin.h"
65 66


67
/* In Ultrix and QNX, sem.h must be included after ipc.h */
68
#include <sys/sem.h>
B
Bruce Momjian 已提交
69

70 71
#include "storage/proc.h"

72
void		HandleDeadLock(SIGNAL_ARGS);
73
static void ProcFreeAllSemaphores(void);
74
static bool GetOffWaitqueue(PROC *);
75

76
int DeadlockTimeout = 1000;
M
 
Marc G. Fournier 已提交
77

78 79 80 81 82 83 84
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
85
SPINLOCK	ProcStructLock;
86 87 88

static PROC_HDR *ProcGlobal = NULL;

89
PROC	   *MyProc = NULL;
90

91
static void ProcKill(int exitStatus, int pid);
92
static void ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum);
93
static void ProcFreeSem(IpcSemaphoreKey semKey, int semNum);
94

V
Vadim B. Mikheev 已提交
95 96
static char *DeadLockMessage = "Deadlock detected -- See the lock(l) manual page for a possible cause.";

97 98
/*
 * InitProcGlobal -
99
 *	  initializes the global process table. We put it here so that
100
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
101 102 103
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
104 105 106 107 108 109 110 111 112 113 114
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
115 116
 */
void
117
InitProcGlobal(IPCKey key, int maxBackends)
118
{
119
	bool		found = false;
120

121 122
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
123
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
124

125 126
	/* --------------------
	 * We're the first - initialize.
127 128
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
129 130 131
	 * --------------------
	 */
	if (!found)
132
	{
133
		int			i;
134

135 136 137 138
		ProcGlobal->freeProcs = INVALID_OFFSET;
		ProcGlobal->currKey = IPCGetProcessSemaphoreInitKey(key);
		for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
			ProcGlobal->freeSemMap[i] = 0;
139

B
Bruce Momjian 已提交
140 141 142
		/*
		 * Arrange to delete semas on exit --- set this up now so that we
		 * will clean up if pre-allocation fails...
143
		 */
144
		on_shmem_exit(ProcFreeAllSemaphores, 0);
145

B
Bruce Momjian 已提交
146 147
		/*
		 * Pre-create the semaphores for the first maxBackends processes,
148 149 150
		 * unless we are running as a standalone backend.
		 */
		if (key != PrivateIPCKey)
151
		{
152
			for (i = 0;
B
Bruce Momjian 已提交
153
				 i < (maxBackends + PROC_NSEMS_PER_SET - 1) / PROC_NSEMS_PER_SET;
154 155 156 157 158 159 160 161 162
				 i++)
			{
				IPCKey		semKey = ProcGlobal->currKey + i;
				int			semId;

				semId = IpcSemaphoreCreate(semKey,
										   PROC_NSEMS_PER_SET,
										   IPCProtection,
										   IpcSemaphoreDefaultStartValue,
163 164 165
										   0);
				if (semId < 0)
					elog(FATAL, "InitProcGlobal: IpcSemaphoreCreate failed");
166 167 168
				/* mark this sema set allocated */
				ProcGlobal->freeSemMap[i] = (1 << PROC_NSEMS_PER_SET);
			}
169
		}
170 171 172 173 174 175 176 177 178 179 180
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
InitProcess(IPCKey key)
{
181 182 183
	bool		found = false;
	unsigned long location,
				myOffset;
184 185 186 187 188

	SpinAcquire(ProcStructLock);

	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
189
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
190
	if (!found)
191
	{
192
		/* this should not happen. InitProcGlobal() is called before this. */
193
		elog(STOP, "InitProcess: Proc Header uninitialized");
194
	}
195 196

	if (MyProc != NULL)
197
	{
198
		SpinRelease(ProcStructLock);
199
		elog(ERROR, "ProcInit: you already exist");
200
		return;
201
	}
202 203 204 205 206 207

	/* try to get a proc from the free list first */

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
208
	{
209 210 211 212 213 214 215
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
216 217 218 219
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
220 221
		 */

222
		MyProc = (PROC *) ShmemAlloc(sizeof(PROC));
223
		if (!MyProc)
224
		{
225 226
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
227
		}
228 229 230

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
231
	}
232

233
	/*
234 235 236
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
237
	 */
B
Bruce Momjian 已提交
238
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
239 240 241 242 243
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
244 245 246 247
		IPCKey		semKey;
		int			semNum;
		int			semId;
		union semun semun;
248 249 250

		ProcGetNewSemKeyAndNum(&semKey, &semNum);

B
Bruce Momjian 已提交
251 252 253 254 255
		/*
		 * Note: because of the pre-allocation done in InitProcGlobal,
		 * this call should always attach to an existing semaphore. It
		 * will (try to) create a new group of semaphores only if the
		 * postmaster tries to start more backends than it said it would.
256
		 */
257 258 259 260
		semId = IpcSemaphoreCreate(semKey,
								   PROC_NSEMS_PER_SET,
								   IPCProtection,
								   IpcSemaphoreDefaultStartValue,
261
								   0);
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
		semun.val = IpcSemaphoreDefaultStartValue;
		semctl(semId, semNum, SETVAL, semun);

		IpcSemaphoreLock(semId, semNum, IpcExclusiveLock);
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
		MyProc->sem.semKey = semKey;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
284
	MyProc->pid = MyProcPid;
285
	MyProc->databaseId = MyDatabaseId;
286
	MyProc->xid = InvalidTransactionId;
287
	MyProc->xmin = InvalidTransactionId;
288 289 290 291 292 293

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
294
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
295 296

	/* -------------------------
297
	 * Install ourselves in the shmem index table.	The name to
298 299 300 301 302 303
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
304
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
305
		elog(STOP, "InitProc: ShmemPID table broken");
306 307 308 309

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

310
	on_shmem_exit(ProcKill, (Datum) MyProcPid);
311 312
}

H
Hiroshi Inoue 已提交
313 314 315 316
/* -----------------------
 * get off the wait queue
 * -----------------------
 */
317
static bool
H
Hiroshi Inoue 已提交
318 319
GetOffWaitqueue(PROC *proc)
{
320 321
	bool		getoffed = false;

H
Hiroshi Inoue 已提交
322 323 324
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
325
		int			lockmode = proc->token;
326
		LOCK	*waitLock = proc->waitLock;
327

328 329
		Assert(waitLock);
		Assert(waitLock->waitProcs.size > 0);
H
Hiroshi Inoue 已提交
330
		SHMQueueDelete(&(proc->links));
331 332 333 334 335 336 337 338 339
		--waitLock->waitProcs.size;
		Assert(waitLock->nHolding > 0);
		Assert(waitLock->nHolding > proc->waitLock->nActive);
		--waitLock->nHolding;
		Assert(waitLock->holders[lockmode] > 0);
		--waitLock->holders[lockmode];
		if (waitLock->activeHolders[lockmode] == waitLock->holders[lockmode])
			waitLock->waitMask &= ~(1 << lockmode);
		ProcLockWakeup(&(waitLock->waitProcs), LOCK_LOCKMETHOD(*waitLock), waitLock);
340
		getoffed = true;
H
Hiroshi Inoue 已提交
341 342 343 344
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

345
	return getoffed;
H
Hiroshi Inoue 已提交
346
}
347

348 349 350 351 352 353 354
/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
355 356 357
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
H
Hiroshi Inoue 已提交
358
	GetOffWaitqueue(MyProc);
359 360 361 362
}

/*
 * ProcRemove -
363 364 365 366 367
 *	  used by the postmaster to clean up the global tables. This also frees
 *	  up the semaphore used for the lmgr of the process. (We have to do
 *	  this is the postmaster instead of doing a IpcSemaphoreKill on exiting
 *	  the process because the semaphore set is shared among backends and
 *	  we don't want to remove other's semaphores on exit.)
368 369 370 371
 */
bool
ProcRemove(int pid)
{
372 373
	SHMEM_OFFSET location;
	PROC	   *proc;
374 375 376 377 378

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
379
		return FALSE;
380 381 382 383 384 385 386 387 388 389 390
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

	ProcFreeSem(proc->sem.semKey, proc->sem.semNum);

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

391
	return TRUE;
392 393 394 395
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
396
 *		this process. Release any of its held spin locks.
397 398 399 400
 */
static void
ProcKill(int exitStatus, int pid)
{
401 402
	PROC	   *proc;
	SHMEM_OFFSET location;
403 404 405 406 407 408 409 410 411 412

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
413
	ShmemPIDLookup(MyProcPid, &location);
414 415 416 417 418
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

419 420 421
	Assert(proc == MyProc || pid != MyProcPid);

	MyProc = NULL;
422 423 424 425 426 427

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
428
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
429

430
#ifdef USER_LOCKS
431

M
 
Marc G. Fournier 已提交
432 433 434 435
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
436 437
#endif

438 439 440 441
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
H
Hiroshi Inoue 已提交
442
	GetOffWaitqueue(proc);
443 444

	return;
445 446 447 448
}

/*
 * ProcQueue package: routines for putting processes to sleep
449
 *		and  waking them up
450 451 452 453 454 455 456 457
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
458
#ifdef NOT_USED
459
PROC_QUEUE *
460 461
ProcQueueAlloc(char *name)
{
462 463
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
464
		ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
465 466

	if (!queue)
467
		return NULL;
468 469
	if (!found)
		ProcQueueInit(queue);
470
	return queue;
471
}
472

473
#endif
474 475 476 477 478

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
479
ProcQueueInit(PROC_QUEUE *queue)
480
{
481 482
	SHMQueueInit(&(queue->links));
	queue->size = 0;
483 484 485
}


486 487 488 489
/*
 *	Handling cancel request while waiting for lock
 *
 */
490 491 492
static bool lockWaiting = false;
void
SetWaitingForLock(bool waiting)
493
{
494 495
	if (waiting == lockWaiting)
		return;
496
	lockWaiting = waiting;
497 498
	if (lockWaiting)
	{
499 500 501 502 503 504
		/* The lock was already released ? */
		if (MyProc->links.next == INVALID_OFFSET)
		{
			lockWaiting = false;
			return;
		}
505
		if (QueryCancel)		/* cancel request pending */
506 507 508 509 510 511 512 513
		{
			if (GetOffWaitqueue(MyProc))
			{
				lockWaiting = false;
				elog(ERROR, "Query cancel requested while waiting lock");
			}
		}
	}
514
}
515 516
void
LockWaitCancel(void)
517
{
518 519
	struct itimerval timeval,
				dummy;
520

521 522
	if (!lockWaiting)
		return;
523 524 525 526 527 528 529
	lockWaiting = false;
	/* Deadlock timer off */
	MemSet(&timeval, 0, sizeof(struct itimerval));
	setitimer(ITIMER_REAL, &timeval, &dummy);
	if (GetOffWaitqueue(MyProc))
		elog(ERROR, "Query cancel requested while waiting lock");
}
530 531 532 533 534 535 536 537 538

/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
 * ASSUME: that no one will fiddle with the queue until after
539
 *		we release the spin lock.
540 541 542 543
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
544
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
545
		  LOCKMETHODCTL *lockctl,
546
		  int token,			/* lockmode */
V
Vadim B. Mikheev 已提交
547
		  LOCK *lock)
548
{
549
	int			i;
V
Vadim B. Mikheev 已提交
550
	SPINLOCK	spinlock = lockctl->masterLock;
551
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
552 553 554 555 556
	int			myMask = (1 << token);
	int			waitMask = lock->waitMask;
	int			aheadHolders[MAX_LOCKMODES];
	bool		selfConflict = (lockctl->conflictTab[token] & myMask),
				prevSame = false;
B
Bruce Momjian 已提交
557 558 559
	bool		deadlock_checked = false;
	struct itimerval timeval,
				dummy;
560

V
Vadim B. Mikheev 已提交
561 562 563
	MyProc->token = token;
	MyProc->waitLock = lock;

B
Bruce Momjian 已提交
564
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
565

V
Vadim B. Mikheev 已提交
566 567 568
	/* if we don't conflict with any waiter - be first in queue */
	if (!(lockctl->conflictTab[token] & waitMask))
		goto ins;
569

V
Vadim B. Mikheev 已提交
570 571 572
	for (i = 1; i < MAX_LOCKMODES; i++)
		aheadHolders[i] = lock->activeHolders[i];
	(aheadHolders[token])++;
573

V
Vadim B. Mikheev 已提交
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	for (i = 0; i < waitQueue->size; i++)
	{
		/* am I waiting for him ? */
		if (lockctl->conflictTab[token] & proc->holdLock)
		{
			/* is he waiting for me ? */
			if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			{
				MyProc->errType = STATUS_ERROR;
				elog(NOTICE, DeadLockMessage);
				goto rt;
			}
			/* being waiting for him - go past */
		}
		/* if he waits for me */
		else if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			break;
		/* if conflicting locks requested */
		else if (lockctl->conflictTab[proc->token] & myMask)
		{
B
Bruce Momjian 已提交
594

V
Vadim B. Mikheev 已提交
595
			/*
B
Bruce Momjian 已提交
596 597
			 * If I request non self-conflicting lock and there are others
			 * requesting the same lock just before me - stay here.
V
Vadim B. Mikheev 已提交
598 599 600 601
			 */
			if (!selfConflict && prevSame)
				break;
		}
B
Bruce Momjian 已提交
602

V
Vadim B. Mikheev 已提交
603
		/*
B
Bruce Momjian 已提交
604 605
		 * Last attempt to don't move any more: if we don't conflict with
		 * rest waiters in queue.
V
Vadim B. Mikheev 已提交
606 607 608
		 */
		else if (!(lockctl->conflictTab[token] & waitMask))
			break;
609

V
Vadim B. Mikheev 已提交
610 611 612
		prevSame = (proc->token == token);
		(aheadHolders[proc->token])++;
		if (aheadHolders[proc->token] == lock->holders[proc->token])
B
Bruce Momjian 已提交
613
			waitMask &= ~(1 << proc->token);
V
Vadim B. Mikheev 已提交
614 615
		proc = (PROC *) MAKE_PTR(proc->links.prev);
	}
616

V
Vadim B. Mikheev 已提交
617
ins:;
618 619 620 621 622 623
	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
624
	waitQueue->size++;
625

V
Vadim B. Mikheev 已提交
626
	lock->waitMask |= myMask;
627 628 629
	SpinRelease(spinlock);

	/* --------------
B
Bruce Momjian 已提交
630
	 * We set this so we can wake up periodically and check for a deadlock.
B
Bruce Momjian 已提交
631 632
	 * If a deadlock is detected, the handler releases the processes
	 * semaphore and aborts the current transaction.
B
Bruce Momjian 已提交
633 634 635
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
636 637
	 * --------------
	 */
B
Bruce Momjian 已提交
638
	MemSet(&timeval, 0, sizeof(struct itimerval));
639 640
	timeval.it_value.tv_sec = DeadlockTimeout / 1000;
	timeval.it_value.tv_usec = (DeadlockTimeout % 1000) * 1000;
641

642
	SetWaitingForLock(true);
B
Bruce Momjian 已提交
643 644
	do
	{
645
		MyProc->errType = NO_ERROR;		/* reset flag after deadlock check */
646

B
Bruce Momjian 已提交
647 648 649 650 651
		if (!deadlock_checked)
			if (setitimer(ITIMER_REAL, &timeval, &dummy))
				elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
		deadlock_checked = true;

B
Bruce Momjian 已提交
652 653 654 655 656 657
		/* --------------
		 * if someone wakes us between SpinRelease and IpcSemaphoreLock,
		 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
		 * the semaphore implementation.
		 * --------------
		 */
M
 
Marc G. Fournier 已提交
658 659
		IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum,
						 IpcExclusiveLock);
660 661
	} while (MyProc->errType == STATUS_NOT_FOUND);		/* sleep after deadlock
														 * check */
662
	lockWaiting = false;
663

B
Bruce Momjian 已提交
664 665 666 667 668
	/* ---------------
	 * We were awoken before a timeout - now disable the timer
	 * ---------------
	 */
	timeval.it_value.tv_sec = 0;
669
	timeval.it_value.tv_usec = 0;
B
Bruce Momjian 已提交
670 671 672
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to diable timer for process wakeup");

673 674 675 676 677 678 679
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

V
Vadim B. Mikheev 已提交
680 681
rt:;

682
#ifdef LOCK_DEBUG
M
 
Marc G. Fournier 已提交
683
	/* Just to get meaningful debug messages from DumpLocks() */
684
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
685 686
#endif

687
	return MyProc->errType;
688 689 690 691 692 693
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
694 695
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
696
 */
B
Bruce Momjian 已提交
697
PROC *
698
ProcWakeup(PROC *proc, int errType)
699
{
700
	PROC	   *retProc;
701 702 703 704 705

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
706
		return (PROC *) NULL;
707 708 709 710 711 712 713 714 715 716 717 718

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum, IpcExclusiveLock);

	return retProc;
719 720 721 722
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
723
 *		released.
724 725
 */
int
726
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
727
{
728
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
729 730
	int			count = 0;
	int			last_locktype = 0;
M
 
Marc G. Fournier 已提交
731 732 733
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
734 735

	if (!queue->size)
736
		return STATUS_NOT_FOUND;
737 738

	proc = (PROC *) MAKE_PTR(queue->links.prev);
M
 
Marc G. Fournier 已提交
739 740
	while ((queue_size--) && (proc))
	{
741

M
 
Marc G. Fournier 已提交
742
		/*
743 744
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
745 746 747 748 749
		 */
		if (proc->token == last_locktype)
			continue;

		/*
V
Vadim B. Mikheev 已提交
750
		 * Does this proc conflict with locks held by others ?
M
 
Marc G. Fournier 已提交
751 752
		 */
		if (LockResolveConflicts(lockmethod,
753
								 lock,
754
								 proc->token,
M
 
Marc G. Fournier 已提交
755 756 757
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
V
Vadim B. Mikheev 已提交
758 759
			if (count != 0)
				break;
M
 
Marc G. Fournier 已提交
760 761 762
			last_locktype = proc->token;
			continue;
		}
763 764 765 766 767 768 769

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
770
		GrantLock(lock, proc->token);
771 772 773

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
774
		 * returns the next proc in chain.
775 776 777
		 */

		count++;
M
 
Marc G. Fournier 已提交
778 779
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
780
	}
781

M
 
Marc G. Fournier 已提交
782 783
	Assert(queue->size >= 0);

784
	if (count)
785
		return STATUS_OK;
786 787
	else
	{
788
		/* Something is still blocking us.	May have deadlocked. */
789 790 791 792 793
#ifdef LOCK_DEBUG
		if (lock->tag.lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
		{
			elog(DEBUG, "ProcLockWakeup: lock(%lx) can't wake up any process", MAKE_OFFSET(lock));
			if (Debug_deadlocks)
M
 
Marc G. Fournier 已提交
794
			DumpAllLocks();
795
		}
M
 
Marc G. Fournier 已提交
796
#endif
797
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
798
	}
799 800 801
}

void
802
ProcAddLock(SHM_QUEUE *elem)
803
{
804
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
805 806 807
}

/* --------------------
808
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
B
Bruce Momjian 已提交
809 810
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
811
 * on a lock so that other processes don't try to wake me up and screw
812 813 814
 * up my semaphore.
 * --------------------
 */
815
void
816
HandleDeadLock(SIGNAL_ARGS)
817
{
B
Bruce Momjian 已提交
818
	LOCK	   *mywaitlock;
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
	 * Damn these names are LONG! -mer
	 * ---------------------
	 */
	if (IpcSemaphoreGetCount(MyProc->sem.semId, MyProc->sem.semNum) ==
		IpcSemaphoreDefaultStartValue)
	{
		UnlockLockTable();
		return;
	}

	/*
	 * you would think this would be unnecessary, but...
	 *
	 * this also means we've been removed already.  in some ports (e.g.,
	 * sparc and aix) the semop(2) implementation is such that we can
	 * actually end up in this handler after someone has removed us from
	 * the queue and bopped the semaphore *but the test above fails to
	 * detect the semaphore update* (presumably something weird having to
	 * do with the order in which the semaphore wakeup signal and SIGALRM
	 * get handled).
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

857 858 859
#ifdef LOCK_DEBUG
    if (Debug_deadlocks)
        DumpAllLocks();
860 861
#endif

B
Bruce Momjian 已提交
862 863
	MyProc->errType = STATUS_NOT_FOUND;
	if (!DeadLockCheck(MyProc, MyProc->waitLock))
B
Bruce Momjian 已提交
864 865 866 867 868 869 870
	{
		UnlockLockTable();
		return;
	}

	mywaitlock = MyProc->waitLock;

871 872 873 874
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
B
Bruce Momjian 已提交
875
	Assert(mywaitlock->waitProcs.size > 0);
876
	lockWaiting = false;
B
Bruce Momjian 已提交
877
	--mywaitlock->waitProcs.size;
878 879 880 881 882 883 884 885
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
	 * Unlock my semaphore so that the count is right for next time.
	 * I was awoken by a signal, not by someone unlocking my semaphore.
	 * ------------------
	 */
M
 
Marc G. Fournier 已提交
886 887
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum,
					   IpcExclusiveLock);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();

V
Vadim B. Mikheev 已提交
903
	elog(NOTICE, DeadLockMessage);
904
	return;
905 906 907
}

void
908
ProcReleaseSpins(PROC *proc)
909
{
910
	int			i;
911 912 913 914 915 916 917

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
918
	{
919
		if (proc->sLocks[i])
920
		{
921 922
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
923 924
		}
	}
H
 
Hiroshi Inoue 已提交
925
	AbortBufferIO();
926 927 928
}

/*****************************************************************************
929
 *
930 931 932 933
 *****************************************************************************/

/*
 * ProcGetNewSemKeyAndNum -
934 935 936 937
 *	  scan the free semaphore bitmap and allocate a single semaphore from
 *	  a semaphore set. (If the semaphore set doesn't exist yet,
 *	  IpcSemaphoreCreate will create it. Otherwise, we use the existing
 *	  semaphore set.)
938 939
 */
static void
940
ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum)
941
{
942 943
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
B
Bruce Momjian 已提交
944
	int32		fullmask = (1 << (PROC_NSEMS_PER_SET + 1)) - 1;
945

946 947 948 949
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
950

951 952
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
953 954
		int			mask = 1;
		int			j;
955 956

		if (freeSemMap[i] == fullmask)
957
			continue;			/* this set is fully allocated */
958 959 960 961 962 963 964

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
B
Bruce Momjian 已提交
965 966
				 * a free semaphore found. Mark it as allocated. Also set
				 * the bit indicating whole set is allocated.
967
				 */
968
				freeSemMap[i] |= mask + (1 << PROC_NSEMS_PER_SET);
969 970 971 972 973 974 975

				*key = ProcGlobal->currKey + i;
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
976 977
	}

978
	/* if we reach here, all the semaphores are in use. */
979
	elog(ERROR, "InitProc: cannot allocate a free semaphore");
980 981 982 983
}

/*
 * ProcFreeSem -
984
 *	  free up our semaphore in the semaphore set.
985 986 987 988
 */
static void
ProcFreeSem(IpcSemaphoreKey semKey, int semNum)
{
989 990 991
	int			mask;
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
992

993 994 995
	i = semKey - ProcGlobal->currKey;
	mask = ~(1 << semNum);
	freeSemMap[i] &= mask;
996

B
Bruce Momjian 已提交
997 998 999 1000
	/*
	 * Formerly we'd release a semaphore set if it was now completely
	 * unused, but now we keep the semaphores to ensure we won't run out
	 * when starting new backends --- cf. InitProcGlobal.  Note that the
1001 1002 1003
	 * PROC_NSEMS_PER_SET+1'st bit of the freeSemMap entry remains set to
	 * indicate it is still allocated; ProcFreeAllSemaphores() needs that.
	 */
1004 1005 1006 1007
}

/*
 * ProcFreeAllSemaphores -
1008 1009 1010
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
1011
 */
1012
static void
1013 1014
ProcFreeAllSemaphores()
{
1015 1016
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
1017

1018 1019 1020 1021 1022
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
		if (freeSemMap[i] != 0)
			IpcSemaphoreKill(ProcGlobal->currKey + i);
	}
1023
}