proc.c 24.7 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
B
Add:  
Bruce Momjian 已提交
6 7
 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.84 2000/11/28 23:27:56 tgl Exp $
12 13 14 15
 *
 *-------------------------------------------------------------------------
 */
/*
16 17
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
18 19 20
 *
 *
 * Interface (a):
21 22 23
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
24 25 26 27 28 29 30 31 32 33
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
34
 *		associated with the process.
35 36
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
37 38 39 40 41 42
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
43 44
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
45 46 47 48
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
49
 *
50
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.84 2000/11/28 23:27:56 tgl Exp $
51
 */
52 53
#include "postgres.h"

54 55
#include <sys/time.h>
#include <unistd.h>
56
#include <signal.h>
57
#include <sys/types.h>
M
Marc G. Fournier 已提交
58

59
#if defined(solaris_sparc) || defined(__CYGWIN__)
60 61 62 63 64
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

#include "miscadmin.h"
65 66


67
/* In Ultrix and QNX, sem.h must be included after ipc.h */
68
#ifdef HAVE_SYS_SEM_H
69
#include <sys/sem.h>
70
#endif
B
Bruce Momjian 已提交
71

72 73
#include "storage/proc.h"

74
void		HandleDeadLock(SIGNAL_ARGS);
75
static void ProcFreeAllSemaphores(void);
76
static bool GetOffWaitqueue(PROC *);
77

78
int DeadlockTimeout = 1000;
M
 
Marc G. Fournier 已提交
79

80 81 82 83 84 85 86
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
87
SPINLOCK	ProcStructLock;
88 89 90

static PROC_HDR *ProcGlobal = NULL;

91
PROC	   *MyProc = NULL;
92

93
static void ProcKill(int exitStatus, Datum pid);
94 95
static void ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum);
static void ProcFreeSem(IpcSemaphoreId semId, int semNum);
V
Vadim B. Mikheev 已提交
96

97 98
/*
 * InitProcGlobal -
99
 *	  initializes the global process table. We put it here so that
100
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
101 102 103
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
104 105 106 107 108 109 110 111 112 113 114
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
115 116
 */
void
117
InitProcGlobal(int maxBackends)
118
{
119
	bool		found = false;
120

121 122
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
123
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
124

125 126
	/* --------------------
	 * We're the first - initialize.
127 128
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
129 130 131
	 * --------------------
	 */
	if (!found)
132
	{
133
		int			i;
134

135
		ProcGlobal->freeProcs = INVALID_OFFSET;
136 137 138
		for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
		{
			ProcGlobal->procSemIds[i] = -1;
139
			ProcGlobal->freeSemMap[i] = 0;
140
		}
141

B
Bruce Momjian 已提交
142 143
		/*
		 * Arrange to delete semas on exit --- set this up now so that we
144 145 146 147
		 * will clean up if pre-allocation fails.  We use our own freeproc,
		 * rather than IpcSemaphoreCreate's removeOnExit option, because
		 * we don't want to fill up the on_shmem_exit list with a separate
		 * entry for each semaphore set.
148
		 */
149
		on_shmem_exit(ProcFreeAllSemaphores, 0);
150

B
Bruce Momjian 已提交
151
		/*
152
		 * Pre-create the semaphores for the first maxBackends processes.
153
		 */
154 155 156
		Assert(maxBackends > 0 && maxBackends <= MAXBACKENDS);

		for (i = 0; i < ((maxBackends-1)/PROC_NSEMS_PER_SET+1); i++)
157
		{
158 159 160 161 162 163 164
			IpcSemaphoreId		semId;

			semId = IpcSemaphoreCreate(PROC_NSEMS_PER_SET,
									   IPCProtection,
									   1,
									   false);
			ProcGlobal->procSemIds[i] = semId;
165
		}
166 167 168 169 170 171 172 173 174
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
175
InitProcess(void)
176
{
177 178 179
	bool		found = false;
	unsigned long location,
				myOffset;
180 181 182

	SpinAcquire(ProcStructLock);

183
	/* attach to the ProcGlobal structure */
184
	ProcGlobal = (PROC_HDR *)
185
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
186
	if (!found)
187
	{
188
		/* this should not happen. InitProcGlobal() is called before this. */
189
		elog(STOP, "InitProcess: Proc Header uninitialized");
190
	}
191 192

	if (MyProc != NULL)
193
	{
194
		SpinRelease(ProcStructLock);
195
		elog(ERROR, "ProcInit: you already exist");
196
	}
197

198
	/* try to get a proc struct from the free list first */
199 200 201 202

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
203
	{
204 205 206 207 208 209 210
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
211 212 213 214
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
215 216
		 */

217
		MyProc = (PROC *) ShmemAlloc(sizeof(PROC));
218
		if (!MyProc)
219
		{
220 221
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
222
		}
223 224 225

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
226
	}
227

228
	/*
229 230 231
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
232
	 */
B
Bruce Momjian 已提交
233
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
234 235 236 237 238
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
239 240 241
		IpcSemaphoreId	semId;
		int				semNum;
		union semun		semun;
242

243
		ProcGetNewSemIdAndNum(&semId, &semNum);
244 245 246 247 248

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
249
		semun.val = 1;
250 251
		semctl(semId, semNum, SETVAL, semun);

252
		IpcSemaphoreLock(semId, semNum);
253 254 255 256 257 258 259 260 261 262 263 264
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
265
	MyProc->pid = MyProcPid;
266
	MyProc->databaseId = MyDatabaseId;
267
	MyProc->xid = InvalidTransactionId;
268
	MyProc->xmin = InvalidTransactionId;
269 270 271 272 273 274

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
275
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
276 277

	/* -------------------------
278
	 * Install ourselves in the shmem index table.	The name to
279 280 281 282 283 284
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
285
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
286
		elog(STOP, "InitProcess: ShmemPID table broken");
287 288 289 290

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

291
	on_shmem_exit(ProcKill, (Datum) MyProcPid);
292 293
}

H
Hiroshi Inoue 已提交
294 295 296 297
/* -----------------------
 * get off the wait queue
 * -----------------------
 */
298
static bool
H
Hiroshi Inoue 已提交
299 300
GetOffWaitqueue(PROC *proc)
{
301 302
	bool		getoffed = false;

H
Hiroshi Inoue 已提交
303 304 305
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
306
		int			lockmode = proc->token;
307
		LOCK	*waitLock = proc->waitLock;
308

309 310
		Assert(waitLock);
		Assert(waitLock->waitProcs.size > 0);
H
Hiroshi Inoue 已提交
311
		SHMQueueDelete(&(proc->links));
312 313 314 315 316 317 318 319 320
		--waitLock->waitProcs.size;
		Assert(waitLock->nHolding > 0);
		Assert(waitLock->nHolding > proc->waitLock->nActive);
		--waitLock->nHolding;
		Assert(waitLock->holders[lockmode] > 0);
		--waitLock->holders[lockmode];
		if (waitLock->activeHolders[lockmode] == waitLock->holders[lockmode])
			waitLock->waitMask &= ~(1 << lockmode);
		ProcLockWakeup(&(waitLock->waitProcs), LOCK_LOCKMETHOD(*waitLock), waitLock);
321
		getoffed = true;
H
Hiroshi Inoue 已提交
322 323 324 325
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

326
	return getoffed;
H
Hiroshi Inoue 已提交
327
}
328

329 330 331 332 333 334 335
/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
336 337 338
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
H
Hiroshi Inoue 已提交
339
	GetOffWaitqueue(MyProc);
340 341 342 343
}

/*
 * ProcRemove -
344
 *	  used by the postmaster to clean up the global tables. This also frees
345
 *	  up the semaphore used for the lmgr of the process.
346 347 348 349
 */
bool
ProcRemove(int pid)
{
350 351
	SHMEM_OFFSET location;
	PROC	   *proc;
352 353 354 355 356

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
357
		return FALSE;
358 359 360 361
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

362
	ProcFreeSem(proc->sem.semId, proc->sem.semNum);
363 364 365 366 367 368

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

369
	return TRUE;
370 371 372 373
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
374
 *		this process. Release any of its held spin locks.
375 376
 */
static void
377
ProcKill(int exitStatus, Datum pid)
378
{
379 380
	PROC	   *proc;
	SHMEM_OFFSET location;
381 382 383 384 385 386 387 388 389 390

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
391
	ShmemPIDLookup(MyProcPid, &location);
392 393 394 395 396
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

397
	Assert(proc == MyProc || (int)pid != MyProcPid);
398 399

	MyProc = NULL;
400 401 402 403 404 405

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
406
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
407

408
#ifdef USER_LOCKS
409

M
 
Marc G. Fournier 已提交
410 411 412 413
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
414 415
#endif

416 417 418 419
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
H
Hiroshi Inoue 已提交
420
	GetOffWaitqueue(proc);
421 422

	return;
423 424 425 426
}

/*
 * ProcQueue package: routines for putting processes to sleep
427
 *		and  waking them up
428 429 430 431 432 433 434 435
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
436
#ifdef NOT_USED
437
PROC_QUEUE *
438 439
ProcQueueAlloc(char *name)
{
440 441
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
442
		ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
443 444

	if (!queue)
445
		return NULL;
446 447
	if (!found)
		ProcQueueInit(queue);
448
	return queue;
449
}
450

451
#endif
452 453 454 455 456

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
457
ProcQueueInit(PROC_QUEUE *queue)
458
{
459 460
	SHMQueueInit(&(queue->links));
	queue->size = 0;
461 462 463
}


464 465 466 467
/*
 *	Handling cancel request while waiting for lock
 *
 */
468
static bool lockWaiting = false;
469

470 471
void
SetWaitingForLock(bool waiting)
472
{
473 474
	if (waiting == lockWaiting)
		return;
475
	lockWaiting = waiting;
476 477
	if (lockWaiting)
	{
478 479 480 481 482 483
		/* The lock was already released ? */
		if (MyProc->links.next == INVALID_OFFSET)
		{
			lockWaiting = false;
			return;
		}
484
		if (QueryCancel)		/* cancel request pending */
485 486 487 488 489 490 491 492
		{
			if (GetOffWaitqueue(MyProc))
			{
				lockWaiting = false;
				elog(ERROR, "Query cancel requested while waiting lock");
			}
		}
	}
493
}
494

495 496
void
LockWaitCancel(void)
497
{
498
#ifndef __BEOS__ 	
499
	struct itimerval timeval,
500
				dummy;
501

502 503
	if (!lockWaiting)
		return;
504 505 506 507
	lockWaiting = false;
	/* Deadlock timer off */
	MemSet(&timeval, 0, sizeof(struct itimerval));
	setitimer(ITIMER_REAL, &timeval, &dummy);
508
#else
509
	/* BeOS doesn't have setitimer, but has set_alarm */
510 511 512 513 514 515 516
	if (!lockWaiting)
		return;
	lockWaiting = false;
	/* Deadlock timer off */
    set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM);
#endif /* __BEOS__ */
        
517 518 519
	if (GetOffWaitqueue(MyProc))
		elog(ERROR, "Query cancel requested while waiting lock");
}
520 521 522 523 524 525 526 527

/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
528 529
 * Result is NO_ERROR if we acquired the lock, STATUS_ERROR if not (deadlock).
 *
530
 * ASSUME: that no one will fiddle with the queue until after
531
 *		we release the spin lock.
532 533 534 535
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
536
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
537
		  LOCKMETHODCTL *lockctl,
538
		  int token,			/* lockmode */
V
Vadim B. Mikheev 已提交
539
		  LOCK *lock)
540
{
541
	int			i;
V
Vadim B. Mikheev 已提交
542
	SPINLOCK	spinlock = lockctl->masterLock;
543
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
544 545 546 547 548
	int			myMask = (1 << token);
	int			waitMask = lock->waitMask;
	int			aheadHolders[MAX_LOCKMODES];
	bool		selfConflict = (lockctl->conflictTab[token] & myMask),
				prevSame = false;
549
#ifndef __BEOS__
B
Bruce Momjian 已提交
550 551
	struct itimerval timeval,
				dummy;
552 553 554
#else
    bigtime_t time_interval;
#endif
555

V
Vadim B. Mikheev 已提交
556 557 558
	MyProc->token = token;
	MyProc->waitLock = lock;

B
Bruce Momjian 已提交
559
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
560

V
Vadim B. Mikheev 已提交
561 562 563
	/* if we don't conflict with any waiter - be first in queue */
	if (!(lockctl->conflictTab[token] & waitMask))
		goto ins;
564

V
Vadim B. Mikheev 已提交
565 566 567
	for (i = 1; i < MAX_LOCKMODES; i++)
		aheadHolders[i] = lock->activeHolders[i];
	(aheadHolders[token])++;
568

V
Vadim B. Mikheev 已提交
569 570 571 572 573 574 575 576
	for (i = 0; i < waitQueue->size; i++)
	{
		/* am I waiting for him ? */
		if (lockctl->conflictTab[token] & proc->holdLock)
		{
			/* is he waiting for me ? */
			if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			{
577
				/* Yes, report deadlock failure */
V
Vadim B. Mikheev 已提交
578 579 580 581 582 583 584 585 586 587 588
				MyProc->errType = STATUS_ERROR;
				goto rt;
			}
			/* being waiting for him - go past */
		}
		/* if he waits for me */
		else if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			break;
		/* if conflicting locks requested */
		else if (lockctl->conflictTab[proc->token] & myMask)
		{
B
Bruce Momjian 已提交
589

V
Vadim B. Mikheev 已提交
590
			/*
B
Bruce Momjian 已提交
591 592
			 * If I request non self-conflicting lock and there are others
			 * requesting the same lock just before me - stay here.
V
Vadim B. Mikheev 已提交
593 594 595 596
			 */
			if (!selfConflict && prevSame)
				break;
		}
B
Bruce Momjian 已提交
597

V
Vadim B. Mikheev 已提交
598
		/*
B
Bruce Momjian 已提交
599 600
		 * Last attempt to don't move any more: if we don't conflict with
		 * rest waiters in queue.
V
Vadim B. Mikheev 已提交
601 602 603
		 */
		else if (!(lockctl->conflictTab[token] & waitMask))
			break;
604

V
Vadim B. Mikheev 已提交
605 606 607
		prevSame = (proc->token == token);
		(aheadHolders[proc->token])++;
		if (aheadHolders[proc->token] == lock->holders[proc->token])
B
Bruce Momjian 已提交
608
			waitMask &= ~(1 << proc->token);
V
Vadim B. Mikheev 已提交
609 610
		proc = (PROC *) MAKE_PTR(proc->links.prev);
	}
611

V
Vadim B. Mikheev 已提交
612
ins:;
613 614 615 616 617 618
	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
619
	waitQueue->size++;
620

V
Vadim B. Mikheev 已提交
621
	lock->waitMask |= myMask;
622 623
	SpinRelease(spinlock);

624 625
	MyProc->errType = NO_ERROR;		/* initialize result for success */

626
	/* --------------
627 628 629 630 631 632 633
	 * Set timer so we can wake up after awhile and check for a deadlock.
	 * If a deadlock is detected, the handler releases the process's
	 * semaphore and sets MyProc->errType = STATUS_ERROR, allowing us to
	 * know that we must report failure rather than success.
	 *
	 * By delaying the check until we've waited for a bit, we can avoid
	 * running the rather expensive deadlock-check code in most cases.
B
Bruce Momjian 已提交
634 635 636
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
637 638
	 * --------------
	 */
639
#ifndef __BEOS__
B
Bruce Momjian 已提交
640
	MemSet(&timeval, 0, sizeof(struct itimerval));
641 642
	timeval.it_value.tv_sec = DeadlockTimeout / 1000;
	timeval.it_value.tv_usec = (DeadlockTimeout % 1000) * 1000;
643 644
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
645
#else
646 647 648
    time_interval = DeadlockTimeout * 1000000; /* usecs */
	if (set_alarm(time_interval, B_ONE_SHOT_RELATIVE_ALARM) < 0)
		elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
649
#endif
650

651
	SetWaitingForLock(true);
652

653 654 655 656 657 658 659 660 661 662 663
	/* --------------
	 * If someone wakes us between SpinRelease and IpcSemaphoreLock,
	 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
	 * the semaphore implementation.  Note also that if HandleDeadLock
	 * is invoked but does not detect a deadlock, IpcSemaphoreLock()
	 * will continue to wait.  There used to be a loop here, but it
	 * was useless code...
	 * --------------
	 */
	IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum);

664
	lockWaiting = false;
665

B
Bruce Momjian 已提交
666
	/* ---------------
667
	 * Disable the timer, if it's still running
B
Bruce Momjian 已提交
668 669
	 * ---------------
	 */
670
#ifndef __BEOS__
B
Bruce Momjian 已提交
671
	timeval.it_value.tv_sec = 0;
672
	timeval.it_value.tv_usec = 0;
B
Bruce Momjian 已提交
673
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
674
		elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
675 676
#else
    if (set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM) < 0)
677
		elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
678
#endif
B
Bruce Momjian 已提交
679

680 681 682 683 684 685 686
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

V
Vadim B. Mikheev 已提交
687 688
rt:;

689
#ifdef LOCK_DEBUG
M
 
Marc G. Fournier 已提交
690
	/* Just to get meaningful debug messages from DumpLocks() */
691
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
692 693
#endif

694
	return MyProc->errType;
695 696 697 698 699 700
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
701 702
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
703
 */
B
Bruce Momjian 已提交
704
PROC *
705
ProcWakeup(PROC *proc, int errType)
706
{
707
	PROC	   *retProc;
708 709 710 711 712

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
713
		return (PROC *) NULL;
714 715 716 717 718 719 720 721 722

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

723
	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum);
724 725

	return retProc;
726 727 728 729
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
730
 *		released.
731 732
 */
int
733
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
734
{
735
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
736 737
	int			count = 0;
	int			last_locktype = 0;
M
 
Marc G. Fournier 已提交
738 739 740
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
741 742

	if (!queue->size)
743
		return STATUS_NOT_FOUND;
744 745

	proc = (PROC *) MAKE_PTR(queue->links.prev);
M
 
Marc G. Fournier 已提交
746 747
	while ((queue_size--) && (proc))
	{
748

M
 
Marc G. Fournier 已提交
749
		/*
750 751
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
752 753 754 755 756
		 */
		if (proc->token == last_locktype)
			continue;

		/*
V
Vadim B. Mikheev 已提交
757
		 * Does this proc conflict with locks held by others ?
M
 
Marc G. Fournier 已提交
758 759
		 */
		if (LockResolveConflicts(lockmethod,
760
								 lock,
761
								 proc->token,
M
 
Marc G. Fournier 已提交
762 763 764
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
V
Vadim B. Mikheev 已提交
765 766
			if (count != 0)
				break;
M
 
Marc G. Fournier 已提交
767 768 769
			last_locktype = proc->token;
			continue;
		}
770 771 772 773 774 775 776

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
777
		GrantLock(lock, proc->token);
778 779 780

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
781
		 * returns the next proc in chain.
782 783 784
		 */

		count++;
M
 
Marc G. Fournier 已提交
785 786
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
787
	}
788

M
 
Marc G. Fournier 已提交
789 790
	Assert(queue->size >= 0);

791
	if (count)
792
		return STATUS_OK;
793 794
	else
	{
795
		/* Something is still blocking us.	May have deadlocked. */
796 797 798 799 800
#ifdef LOCK_DEBUG
		if (lock->tag.lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
		{
			elog(DEBUG, "ProcLockWakeup: lock(%lx) can't wake up any process", MAKE_OFFSET(lock));
			if (Debug_deadlocks)
M
 
Marc G. Fournier 已提交
801
			DumpAllLocks();
802
		}
M
 
Marc G. Fournier 已提交
803
#endif
804
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
805
	}
806 807 808
}

void
809
ProcAddLock(SHM_QUEUE *elem)
810
{
811
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
812 813 814
}

/* --------------------
815
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
B
Bruce Momjian 已提交
816 817
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
818
 * on a lock so that other processes don't try to wake me up and screw
819 820 821
 * up my semaphore.
 * --------------------
 */
822
void
823
HandleDeadLock(SIGNAL_ARGS)
824
{
B
Bruce Momjian 已提交
825
	LOCK	   *mywaitlock;
826 827 828 829 830 831 832 833 834 835

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
836 837 838
	 * We check by looking to see if we've been unlinked from the wait queue.
	 * This is quicker than checking our semaphore's state, since no kernel
	 * call is needed, and it is safe because we hold the locktable lock.
839 840 841 842 843 844 845 846 847
	 * ---------------------
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

848 849 850
#ifdef LOCK_DEBUG
    if (Debug_deadlocks)
        DumpAllLocks();
851 852
#endif

B
Bruce Momjian 已提交
853
	if (!DeadLockCheck(MyProc, MyProc->waitLock))
B
Bruce Momjian 已提交
854
	{
855
		/* No deadlock, so keep waiting */
B
Bruce Momjian 已提交
856 857 858 859
		UnlockLockTable();
		return;
	}

860 861 862 863
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
864
	mywaitlock = MyProc->waitLock;
B
Bruce Momjian 已提交
865
	Assert(mywaitlock->waitProcs.size > 0);
866
	lockWaiting = false;
B
Bruce Momjian 已提交
867
	--mywaitlock->waitProcs.size;
868 869 870 871
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
872
	 * Unlock my semaphore so that the interrupted ProcSleep() call can finish.
873 874
	 * ------------------
	 */
875
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum);
876 877 878 879 880 881 882 883 884 885 886 887 888 889

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();
890 891 892
}

void
893
ProcReleaseSpins(PROC *proc)
894
{
895
	int			i;
896 897 898 899 900 901 902

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
903
	{
904
		if (proc->sLocks[i])
905
		{
906 907
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
908 909
		}
	}
H
 
Hiroshi Inoue 已提交
910
	AbortBufferIO();
911 912 913
}

/*****************************************************************************
914
 *
915 916 917
 *****************************************************************************/

/*
918
 * ProcGetNewSemIdAndNum -
919
 *	  scan the free semaphore bitmap and allocate a single semaphore from
920
 *	  a semaphore set.
921 922
 */
static void
923
ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum)
924
{
925
	int			i;
926
	IpcSemaphoreId *procSemIds = ProcGlobal->procSemIds;
927
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
928
	int32		fullmask = (1 << PROC_NSEMS_PER_SET) - 1;
929

930 931 932 933
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
934

935
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
936
	{
937 938
		int			mask = 1;
		int			j;
939 940

		if (freeSemMap[i] == fullmask)
941
			continue;			/* this set is fully allocated */
942 943
		if (procSemIds[i] < 0)
			continue;			/* this set hasn't been initialized */
944 945 946 947 948 949 950

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
951
				 * a free semaphore found. Mark it as allocated.
952
				 */
953
				freeSemMap[i] |= mask;
954

955
				*semId = procSemIds[i];
956 957 958 959 960
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
961 962
	}

963
	/* if we reach here, all the semaphores are in use. */
964
	elog(ERROR, "ProcGetNewSemIdAndNum: cannot allocate a free semaphore");
965 966 967 968
}

/*
 * ProcFreeSem -
969
 *	  free up our semaphore in the semaphore set.
970 971
 */
static void
972
ProcFreeSem(IpcSemaphoreId semId, int semNum)
973
{
974
	int32		mask;
975
	int			i;
976

977
	mask = ~(1 << semNum);
978

979 980 981 982 983 984 985 986 987
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
	{
		if (ProcGlobal->procSemIds[i] == semId)
		{
			ProcGlobal->freeSemMap[i] &= mask;
			return;
		}
	}
	fprintf(stderr, "ProcFreeSem: no ProcGlobal entry for semId %d\n", semId);
988 989 990 991
}

/*
 * ProcFreeAllSemaphores -
992 993 994
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
995
 */
996
static void
997
ProcFreeAllSemaphores(void)
998
{
999
	int			i;
1000

1001
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
1002
	{
1003 1004
		if (ProcGlobal->procSemIds[i] >= 0)
			IpcSemaphoreKill(ProcGlobal->procSemIds[i]);
1005
	}
1006
}