proc.c 24.8 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
B
Add:  
Bruce Momjian 已提交
6 7
 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
H
Hiroshi Inoue 已提交
11
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.68 2000/02/21 02:42:36 inoue Exp $
12 13 14 15
 *
 *-------------------------------------------------------------------------
 */
/*
16 17
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
18 19 20
 *
 *
 * Interface (a):
21 22 23
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
24 25 26 27 28 29 30 31 32 33
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
34
 *		associated with the process.
35 36
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
37 38 39 40 41 42
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
43 44
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
45 46 47 48
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
49
 *
H
Hiroshi Inoue 已提交
50
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.68 2000/02/21 02:42:36 inoue Exp $
51 52 53
 */
#include <sys/time.h>
#include <unistd.h>
54
#include <signal.h>
55
#include <sys/types.h>
M
Marc G. Fournier 已提交
56

57
#if defined(solaris_sparc) || defined(__CYGWIN__)
58 59 60 61
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

M
Marc G. Fournier 已提交
62
#include "postgres.h"
63
#include "miscadmin.h"
64
#include "libpq/pqsignal.h"
65 66


67 68
#include "storage/ipc.h"
/* In Ultrix and QNX, sem.h must be included after ipc.h */
69
#include <sys/sem.h>
B
Bruce Momjian 已提交
70

B
Bruce Momjian 已提交
71
#include "storage/lmgr.h"
72
#include "storage/proc.h"
M
 
Marc G. Fournier 已提交
73
#include "utils/trace.h"
74

75
void HandleDeadLock(SIGNAL_ARGS);
76
static void ProcFreeAllSemaphores(void);
77

M
 
Marc G. Fournier 已提交
78 79
#define DeadlockCheckTimer pg_options[OPT_DEADLOCKTIMEOUT]

80 81 82 83 84 85 86
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
87
SPINLOCK	ProcStructLock;
88 89 90

static PROC_HDR *ProcGlobal = NULL;

91
PROC	   *MyProc = NULL;
92

93
static void ProcKill(int exitStatus, int pid);
94
static void ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum);
95
static void ProcFreeSem(IpcSemaphoreKey semKey, int semNum);
96

V
Vadim B. Mikheev 已提交
97 98
static char *DeadLockMessage = "Deadlock detected -- See the lock(l) manual page for a possible cause.";

99 100
/*
 * InitProcGlobal -
101
 *	  initializes the global process table. We put it here so that
102
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
103 104 105
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
106 107 108 109 110 111 112 113 114 115 116
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
117 118
 */
void
119
InitProcGlobal(IPCKey key, int maxBackends)
120
{
121
	bool		found = false;
122

123 124 125
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
126

127 128
	/* --------------------
	 * We're the first - initialize.
129 130
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
131 132 133
	 * --------------------
	 */
	if (!found)
134
	{
135
		int			i;
136

137 138 139 140
		ProcGlobal->freeProcs = INVALID_OFFSET;
		ProcGlobal->currKey = IPCGetProcessSemaphoreInitKey(key);
		for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
			ProcGlobal->freeSemMap[i] = 0;
141

B
Bruce Momjian 已提交
142 143 144
		/*
		 * Arrange to delete semas on exit --- set this up now so that we
		 * will clean up if pre-allocation fails...
145 146 147
		 */
		on_shmem_exit(ProcFreeAllSemaphores, NULL);

B
Bruce Momjian 已提交
148 149
		/*
		 * Pre-create the semaphores for the first maxBackends processes,
150 151 152
		 * unless we are running as a standalone backend.
		 */
		if (key != PrivateIPCKey)
153
		{
154
			for (i = 0;
B
Bruce Momjian 已提交
155
				 i < (maxBackends + PROC_NSEMS_PER_SET - 1) / PROC_NSEMS_PER_SET;
156 157 158 159 160 161 162 163 164
				 i++)
			{
				IPCKey		semKey = ProcGlobal->currKey + i;
				int			semId;

				semId = IpcSemaphoreCreate(semKey,
										   PROC_NSEMS_PER_SET,
										   IPCProtection,
										   IpcSemaphoreDefaultStartValue,
165 166 167
										   0);
				if (semId < 0)
					elog(FATAL, "InitProcGlobal: IpcSemaphoreCreate failed");
168 169 170
				/* mark this sema set allocated */
				ProcGlobal->freeSemMap[i] = (1 << PROC_NSEMS_PER_SET);
			}
171
		}
172 173 174 175 176 177 178 179 180 181 182
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
InitProcess(IPCKey key)
{
183 184 185
	bool		found = false;
	unsigned long location,
				myOffset;
186 187 188 189 190 191 192

	SpinAcquire(ProcStructLock);

	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
		ShmemInitStruct("Proc Header", (unsigned) sizeof(PROC_HDR), &found);
	if (!found)
193
	{
194
		/* this should not happen. InitProcGlobal() is called before this. */
195
		elog(STOP, "InitProcess: Proc Header uninitialized");
196
	}
197 198

	if (MyProc != NULL)
199
	{
200
		SpinRelease(ProcStructLock);
201
		elog(ERROR, "ProcInit: you already exist");
202
		return;
203
	}
204 205 206 207 208 209

	/* try to get a proc from the free list first */

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
210
	{
211 212 213 214 215 216 217
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
218 219 220 221
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
222 223 224 225
		 */

		MyProc = (PROC *) ShmemAlloc((unsigned) sizeof(PROC));
		if (!MyProc)
226
		{
227 228
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
229
		}
230 231 232

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
233
	}
234

235
	/*
236 237 238
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
239
	 */
B
Bruce Momjian 已提交
240
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
241 242 243 244 245
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
246 247 248 249
		IPCKey		semKey;
		int			semNum;
		int			semId;
		union semun semun;
250 251 252

		ProcGetNewSemKeyAndNum(&semKey, &semNum);

B
Bruce Momjian 已提交
253 254 255 256 257
		/*
		 * Note: because of the pre-allocation done in InitProcGlobal,
		 * this call should always attach to an existing semaphore. It
		 * will (try to) create a new group of semaphores only if the
		 * postmaster tries to start more backends than it said it would.
258
		 */
259 260 261 262
		semId = IpcSemaphoreCreate(semKey,
								   PROC_NSEMS_PER_SET,
								   IPCProtection,
								   IpcSemaphoreDefaultStartValue,
263
								   0);
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
		semun.val = IpcSemaphoreDefaultStartValue;
		semctl(semId, semNum, SETVAL, semun);

		IpcSemaphoreLock(semId, semNum, IpcExclusiveLock);
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
		MyProc->sem.semKey = semKey;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
286
	MyProc->pid = MyProcPid;
287
	MyProc->databaseId = MyDatabaseId;
288
	MyProc->xid = InvalidTransactionId;
289
	MyProc->xmin = InvalidTransactionId;
290 291 292 293 294 295

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
296
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
297 298

	/* -------------------------
299
	 * Install ourselves in the shmem index table.	The name to
300 301 302 303 304 305
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
306
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
307
		elog(STOP, "InitProc: ShmemPID table broken");
308 309 310 311

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

312
	on_shmem_exit(ProcKill, (caddr_t) MyProcPid);
313 314
}

H
Hiroshi Inoue 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/* -----------------------
 * get off the wait queue
 * -----------------------
 */
static void
GetOffWaitqueue(PROC *proc)
{
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
		int	lockmode = proc->token;
		Assert(proc->waitLock->waitProcs.size > 0);
		SHMQueueDelete(&(proc->links));
		--proc->waitLock->waitProcs.size;
		Assert(proc->waitLock->nHolding > 0);
		Assert(proc->waitLock->nHolding > proc->waitLock->nActive);
		--proc->waitLock->nHolding;
		Assert(proc->waitLock->holders[lockmode] > 0);
		--proc->waitLock->holders[lockmode];
		if (proc->waitLock->activeHolders[lockmode] ==
			proc->waitLock->holders[lockmode])
			proc->waitLock->waitMask &= ~(1 << lockmode);
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

	return;
}
343 344 345 346 347 348 349
/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
350 351 352
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
H
Hiroshi Inoue 已提交
353
	GetOffWaitqueue(MyProc);
354 355 356 357
}

/*
 * ProcRemove -
358 359 360 361 362
 *	  used by the postmaster to clean up the global tables. This also frees
 *	  up the semaphore used for the lmgr of the process. (We have to do
 *	  this is the postmaster instead of doing a IpcSemaphoreKill on exiting
 *	  the process because the semaphore set is shared among backends and
 *	  we don't want to remove other's semaphores on exit.)
363 364 365 366
 */
bool
ProcRemove(int pid)
{
367 368
	SHMEM_OFFSET location;
	PROC	   *proc;
369 370 371 372 373

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
374
		return FALSE;
375 376 377 378 379 380 381 382 383 384 385
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

	ProcFreeSem(proc->sem.semKey, proc->sem.semNum);

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

386
	return TRUE;
387 388 389 390
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
391
 *		this process. Release any of its held spin locks.
392 393 394 395
 */
static void
ProcKill(int exitStatus, int pid)
{
396 397
	PROC	   *proc;
	SHMEM_OFFSET location;
398 399 400 401 402 403 404 405 406 407

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
408
	ShmemPIDLookup(MyProcPid, &location);
409 410 411 412 413
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

414 415 416
	Assert(proc == MyProc || pid != MyProcPid);

	MyProc = NULL;
417 418 419 420 421 422

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
423
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
424

425
#ifdef USER_LOCKS
426

M
 
Marc G. Fournier 已提交
427 428 429 430
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
431 432
#endif

433 434 435 436
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
H
Hiroshi Inoue 已提交
437
	GetOffWaitqueue(proc);
438 439

	return;
440 441 442 443
}

/*
 * ProcQueue package: routines for putting processes to sleep
444
 *		and  waking them up
445 446 447 448 449 450 451 452
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
453
#ifdef NOT_USED
454
PROC_QUEUE *
455 456
ProcQueueAlloc(char *name)
{
457 458
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
459 460 461
	ShmemInitStruct(name, (unsigned) sizeof(PROC_QUEUE), &found);

	if (!queue)
462
		return NULL;
463 464
	if (!found)
		ProcQueueInit(queue);
465
	return queue;
466
}
467

468
#endif
469 470 471 472 473

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
474
ProcQueueInit(PROC_QUEUE *queue)
475
{
476 477
	SHMQueueInit(&(queue->links));
	queue->size = 0;
478 479 480 481 482 483 484 485 486 487 488 489
}



/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
 * ASSUME: that no one will fiddle with the queue until after
490
 *		we release the spin lock.
491 492 493 494
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
495
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
496
		  LOCKMETHODCTL *lockctl,
497
		  int token,			/* lockmode */
V
Vadim B. Mikheev 已提交
498
		  LOCK *lock)
499
{
500
	int			i;
V
Vadim B. Mikheev 已提交
501
	SPINLOCK	spinlock = lockctl->masterLock;
502
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
503 504 505 506 507
	int			myMask = (1 << token);
	int			waitMask = lock->waitMask;
	int			aheadHolders[MAX_LOCKMODES];
	bool		selfConflict = (lockctl->conflictTab[token] & myMask),
				prevSame = false;
B
Bruce Momjian 已提交
508 509 510
	bool		deadlock_checked = false;
	struct itimerval timeval,
				dummy;
511

V
Vadim B. Mikheev 已提交
512 513 514
	MyProc->token = token;
	MyProc->waitLock = lock;

B
Bruce Momjian 已提交
515
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
516

V
Vadim B. Mikheev 已提交
517 518 519
	/* if we don't conflict with any waiter - be first in queue */
	if (!(lockctl->conflictTab[token] & waitMask))
		goto ins;
520

V
Vadim B. Mikheev 已提交
521 522 523
	for (i = 1; i < MAX_LOCKMODES; i++)
		aheadHolders[i] = lock->activeHolders[i];
	(aheadHolders[token])++;
524

V
Vadim B. Mikheev 已提交
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	for (i = 0; i < waitQueue->size; i++)
	{
		/* am I waiting for him ? */
		if (lockctl->conflictTab[token] & proc->holdLock)
		{
			/* is he waiting for me ? */
			if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			{
				MyProc->errType = STATUS_ERROR;
				elog(NOTICE, DeadLockMessage);
				goto rt;
			}
			/* being waiting for him - go past */
		}
		/* if he waits for me */
		else if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			break;
		/* if conflicting locks requested */
		else if (lockctl->conflictTab[proc->token] & myMask)
		{
B
Bruce Momjian 已提交
545

V
Vadim B. Mikheev 已提交
546
			/*
B
Bruce Momjian 已提交
547 548
			 * If I request non self-conflicting lock and there are others
			 * requesting the same lock just before me - stay here.
V
Vadim B. Mikheev 已提交
549 550 551 552
			 */
			if (!selfConflict && prevSame)
				break;
		}
B
Bruce Momjian 已提交
553

V
Vadim B. Mikheev 已提交
554
		/*
B
Bruce Momjian 已提交
555 556
		 * Last attempt to don't move any more: if we don't conflict with
		 * rest waiters in queue.
V
Vadim B. Mikheev 已提交
557 558 559
		 */
		else if (!(lockctl->conflictTab[token] & waitMask))
			break;
560

V
Vadim B. Mikheev 已提交
561 562 563
		prevSame = (proc->token == token);
		(aheadHolders[proc->token])++;
		if (aheadHolders[proc->token] == lock->holders[proc->token])
B
Bruce Momjian 已提交
564
			waitMask &= ~(1 << proc->token);
V
Vadim B. Mikheev 已提交
565 566
		proc = (PROC *) MAKE_PTR(proc->links.prev);
	}
567

V
Vadim B. Mikheev 已提交
568
ins:;
569 570 571 572 573 574
	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
575
	waitQueue->size++;
576

V
Vadim B. Mikheev 已提交
577
	lock->waitMask |= myMask;
578 579 580
	SpinRelease(spinlock);

	/* --------------
B
Bruce Momjian 已提交
581
	 * We set this so we can wake up periodically and check for a deadlock.
B
Bruce Momjian 已提交
582 583
	 * If a deadlock is detected, the handler releases the processes
	 * semaphore and aborts the current transaction.
B
Bruce Momjian 已提交
584 585 586
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
587 588
	 * --------------
	 */
B
Bruce Momjian 已提交
589 590 591
	MemSet(&timeval, 0, sizeof(struct itimerval));
	timeval.it_value.tv_sec = \
		(DeadlockCheckTimer ? DeadlockCheckTimer : DEADLOCK_CHECK_TIMER);
592

H
Hiroshi Inoue 已提交
593
	SetLockWaiting(true);
B
Bruce Momjian 已提交
594 595
	do
	{
596
		MyProc->errType = NO_ERROR;		/* reset flag after deadlock check */
597

B
Bruce Momjian 已提交
598 599 600 601 602
		if (!deadlock_checked)
			if (setitimer(ITIMER_REAL, &timeval, &dummy))
				elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
		deadlock_checked = true;

B
Bruce Momjian 已提交
603 604 605 606 607 608
		/* --------------
		 * if someone wakes us between SpinRelease and IpcSemaphoreLock,
		 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
		 * the semaphore implementation.
		 * --------------
		 */
M
 
Marc G. Fournier 已提交
609 610
		IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum,
						 IpcExclusiveLock);
611 612
	} while (MyProc->errType == STATUS_NOT_FOUND);		/* sleep after deadlock
														 * check */
H
Hiroshi Inoue 已提交
613
	SetLockWaiting(false);
614

B
Bruce Momjian 已提交
615 616 617 618 619 620 621 622
	/* ---------------
	 * We were awoken before a timeout - now disable the timer
	 * ---------------
	 */
	timeval.it_value.tv_sec = 0;
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to diable timer for process wakeup");

623 624 625 626 627 628 629
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

V
Vadim B. Mikheev 已提交
630 631
rt:;

M
 
Marc G. Fournier 已提交
632 633
#ifdef LOCK_MGR_DEBUG
	/* Just to get meaningful debug messages from DumpLocks() */
634
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
635 636
#endif

637
	return MyProc->errType;
638 639 640 641 642 643
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
644 645
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
646
 */
B
Bruce Momjian 已提交
647
PROC *
648
ProcWakeup(PROC *proc, int errType)
649
{
650
	PROC	   *retProc;
651 652 653 654 655

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
656
		return (PROC *) NULL;
657 658 659 660 661 662 663 664 665 666 667 668

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum, IpcExclusiveLock);

	return retProc;
669 670 671 672
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
673
 *		released.
674 675
 */
int
676
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
677
{
678
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
679
	int			count = 0;
M
 
Marc G. Fournier 已提交
680
	int			trace_flag;
V
Vadim B. Mikheev 已提交
681
	int			last_locktype = 0;
M
 
Marc G. Fournier 已提交
682 683 684
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
685 686

	if (!queue->size)
687
		return STATUS_NOT_FOUND;
688 689

	proc = (PROC *) MAKE_PTR(queue->links.prev);
M
 
Marc G. Fournier 已提交
690 691
	while ((queue_size--) && (proc))
	{
692

M
 
Marc G. Fournier 已提交
693
		/*
694 695
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
696 697 698 699 700
		 */
		if (proc->token == last_locktype)
			continue;

		/*
V
Vadim B. Mikheev 已提交
701
		 * Does this proc conflict with locks held by others ?
M
 
Marc G. Fournier 已提交
702 703
		 */
		if (LockResolveConflicts(lockmethod,
704
								 lock,
705
								 proc->token,
M
 
Marc G. Fournier 已提交
706 707 708
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
V
Vadim B. Mikheev 已提交
709 710
			if (count != 0)
				break;
M
 
Marc G. Fournier 已提交
711 712 713
			last_locktype = proc->token;
			continue;
		}
714 715 716 717 718 719 720

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
721
		GrantLock(lock, proc->token);
722 723 724

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
725
		 * returns the next proc in chain.
726 727 728
		 */

		count++;
M
 
Marc G. Fournier 已提交
729 730
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
731
	}
732

M
 
Marc G. Fournier 已提交
733 734
	Assert(queue->size >= 0);

735
	if (count)
736
		return STATUS_OK;
737 738
	else
	{
739
		/* Something is still blocking us.	May have deadlocked. */
M
 
Marc G. Fournier 已提交
740 741 742 743 744 745 746 747 748
		trace_flag = (lock->tag.lockmethod == USER_LOCKMETHOD) ? \
			TRACE_USERLOCKS : TRACE_LOCKS;
		TPRINTF(trace_flag,
				"ProcLockWakeup: lock(%x) can't wake up any process",
				MAKE_OFFSET(lock));
#ifdef DEADLOCK_DEBUG
		if (pg_options[trace_flag] >= 2)
			DumpAllLocks();
#endif
749
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
750
	}
751 752 753
}

void
754
ProcAddLock(SHM_QUEUE *elem)
755
{
756
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
757 758 759
}

/* --------------------
B
Bruce Momjian 已提交
760 761 762
 * We only get to this routine if we got SIGALRM after DEADLOCK_CHECK_TIMER
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
763
 * on a lock so that other processes don't try to wake me up and screw
764 765 766
 * up my semaphore.
 * --------------------
 */
767 768
void
HandleDeadLock(SIGNAL_ARGS)
769
{
B
Bruce Momjian 已提交
770
	LOCK	   *mywaitlock;
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
	 * Damn these names are LONG! -mer
	 * ---------------------
	 */
	if (IpcSemaphoreGetCount(MyProc->sem.semId, MyProc->sem.semNum) ==
		IpcSemaphoreDefaultStartValue)
	{
		UnlockLockTable();
		return;
	}

	/*
	 * you would think this would be unnecessary, but...
	 *
	 * this also means we've been removed already.  in some ports (e.g.,
	 * sparc and aix) the semop(2) implementation is such that we can
	 * actually end up in this handler after someone has removed us from
	 * the queue and bopped the semaphore *but the test above fails to
	 * detect the semaphore update* (presumably something weird having to
	 * do with the order in which the semaphore wakeup signal and SIGALRM
	 * get handled).
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

809
#ifdef DEADLOCK_DEBUG
M
 
Marc G. Fournier 已提交
810
	DumpAllLocks();
811 812
#endif

B
Bruce Momjian 已提交
813 814
	MyProc->errType = STATUS_NOT_FOUND;
	if (!DeadLockCheck(MyProc, MyProc->waitLock))
B
Bruce Momjian 已提交
815 816 817 818 819 820 821
	{
		UnlockLockTable();
		return;
	}

	mywaitlock = MyProc->waitLock;

822 823 824 825
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
B
Bruce Momjian 已提交
826 827
	Assert(mywaitlock->waitProcs.size > 0);
	--mywaitlock->waitProcs.size;
828 829 830 831 832 833 834 835
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
	 * Unlock my semaphore so that the count is right for next time.
	 * I was awoken by a signal, not by someone unlocking my semaphore.
	 * ------------------
	 */
M
 
Marc G. Fournier 已提交
836 837
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum,
					   IpcExclusiveLock);
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();

V
Vadim B. Mikheev 已提交
853
	elog(NOTICE, DeadLockMessage);
854
	return;
855 856 857
}

void
858
ProcReleaseSpins(PROC *proc)
859
{
860
	int			i;
861 862 863 864 865 866 867

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
868
	{
869
		if (proc->sLocks[i])
870
		{
871 872
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
873 874
		}
	}
H
 
Hiroshi Inoue 已提交
875
	AbortBufferIO();
876 877 878
}

/*****************************************************************************
879
 *
880 881 882 883
 *****************************************************************************/

/*
 * ProcGetNewSemKeyAndNum -
884 885 886 887
 *	  scan the free semaphore bitmap and allocate a single semaphore from
 *	  a semaphore set. (If the semaphore set doesn't exist yet,
 *	  IpcSemaphoreCreate will create it. Otherwise, we use the existing
 *	  semaphore set.)
888 889
 */
static void
890
ProcGetNewSemKeyAndNum(IPCKey *key, int *semNum)
891
{
892 893
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
B
Bruce Momjian 已提交
894
	int32		fullmask = (1 << (PROC_NSEMS_PER_SET + 1)) - 1;
895

896 897 898 899
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
900

901 902
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
903 904
		int			mask = 1;
		int			j;
905 906

		if (freeSemMap[i] == fullmask)
907
			continue;			/* this set is fully allocated */
908 909 910 911 912 913 914

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
B
Bruce Momjian 已提交
915 916
				 * a free semaphore found. Mark it as allocated. Also set
				 * the bit indicating whole set is allocated.
917
				 */
918
				freeSemMap[i] |= mask + (1 << PROC_NSEMS_PER_SET);
919 920 921 922 923 924 925

				*key = ProcGlobal->currKey + i;
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
926 927
	}

928
	/* if we reach here, all the semaphores are in use. */
929
	elog(ERROR, "InitProc: cannot allocate a free semaphore");
930 931 932 933
}

/*
 * ProcFreeSem -
934
 *	  free up our semaphore in the semaphore set.
935 936 937 938
 */
static void
ProcFreeSem(IpcSemaphoreKey semKey, int semNum)
{
939 940 941
	int			mask;
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
942

943 944 945
	i = semKey - ProcGlobal->currKey;
	mask = ~(1 << semNum);
	freeSemMap[i] &= mask;
946

B
Bruce Momjian 已提交
947 948 949 950
	/*
	 * Formerly we'd release a semaphore set if it was now completely
	 * unused, but now we keep the semaphores to ensure we won't run out
	 * when starting new backends --- cf. InitProcGlobal.  Note that the
951 952 953
	 * PROC_NSEMS_PER_SET+1'st bit of the freeSemMap entry remains set to
	 * indicate it is still allocated; ProcFreeAllSemaphores() needs that.
	 */
954 955 956 957
}

/*
 * ProcFreeAllSemaphores -
958 959 960
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
961
 */
962
static void
963 964
ProcFreeAllSemaphores()
{
965 966
	int			i;
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
967

968 969 970 971 972
	for (i = 0; i < MAX_PROC_SEMS / PROC_NSEMS_PER_SET; i++)
	{
		if (freeSemMap[i] != 0)
			IpcSemaphoreKill(ProcGlobal->currKey + i);
	}
973
}