proc.c 24.8 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
B
Add:  
Bruce Momjian 已提交
6 7
 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.86 2000/12/11 16:35:59 tgl Exp $
12 13 14 15
 *
 *-------------------------------------------------------------------------
 */
/*
16 17
 *	Each postgres backend gets one of these.  We'll use it to
 *	clean up after the process should the process suddenly die.
18 19 20
 *
 *
 * Interface (a):
21 22 23
 *		ProcSleep(), ProcWakeup(), ProcWakeupNext(),
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
24 25 26 27 28 29 30 31 32 33
 *
 * Locking and waiting for buffers can cause the backend to be
 * put to sleep.  Whoever releases the lock, etc. wakes the
 * process up again (and gives it an error code so it knows
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
 * ProcReleaseLocks -- frees the locks associated with this process,
 * ProcKill -- destroys the shared memory state (and locks)
34
 *		associated with the process.
35 36
 *
 * 5/15/91 -- removed the buffer pool based lock chain in favor
37 38 39 40 41 42
 *		of a shared memory lock chain.	The write-protection is
 *		more expensive if the lock chain is in the buffer pool.
 *		The only reason I kept the lock chain in the buffer pool
 *		in the first place was to allow the lock table to grow larger
 *		than available shared memory and that isn't going to work
 *		without a lot of unimplemented support anyway.
43 44
 *
 * 4/7/95 -- instead of allocating a set of 1 semaphore per process, we
45 46 47 48
 *		allocate a semaphore from a set of PROC_NSEMS_PER_SET semaphores
 *		shared among backends (we keep a few sets of semaphores around).
 *		This is so that we can support more backends. (system-wide semaphore
 *		sets run out pretty fast.)				  -ay 4/95
49
 *
50
 * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.86 2000/12/11 16:35:59 tgl Exp $
51
 */
52 53
#include "postgres.h"

54 55
#include <sys/time.h>
#include <unistd.h>
56
#include <signal.h>
57
#include <sys/types.h>
M
Marc G. Fournier 已提交
58

59
#if defined(solaris_sparc) || defined(__CYGWIN__)
60 61 62 63
#include <sys/ipc.h>
#include <sys/sem.h>
#endif

64 65
#include "miscadmin.h"

66 67 68 69
#if defined(__darwin__)
#include "port/darwin/sem.h"
#endif

70
/* In Ultrix and QNX, sem.h must be included after ipc.h */
71
#ifdef HAVE_SYS_SEM_H
72
#include <sys/sem.h>
73
#endif
B
Bruce Momjian 已提交
74

75 76
#include "storage/proc.h"

77

78

79
void		HandleDeadLock(SIGNAL_ARGS);
80
static void ProcFreeAllSemaphores(void);
81
static bool GetOffWaitqueue(PROC *);
82

83
int DeadlockTimeout = 1000;
M
 
Marc G. Fournier 已提交
84

85 86 87 88 89 90 91
/* --------------------
 * Spin lock for manipulating the shared process data structure:
 * ProcGlobal.... Adding an extra spin lock seemed like the smallest
 * hack to get around reading and updating this structure in shared
 * memory. -mer 17 July 1991
 * --------------------
 */
92
SPINLOCK	ProcStructLock;
93 94 95

static PROC_HDR *ProcGlobal = NULL;

96
PROC	   *MyProc = NULL;
97

98
static void ProcKill(int exitStatus, Datum pid);
99 100
static void ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum);
static void ProcFreeSem(IpcSemaphoreId semId, int semNum);
V
Vadim B. Mikheev 已提交
101

102 103
/*
 * InitProcGlobal -
104
 *	  initializes the global process table. We put it here so that
105
 *	  the postmaster can do this initialization. (ProcFreeAllSemaphores needs
106 107 108
 *	  to read this table on exiting the postmaster. If we have the first
 *	  backend do this, starting up and killing the postmaster without
 *	  starting any backends will be a problem.)
109 110 111 112 113 114 115 116 117 118 119
 *
 *	  We also allocate all the per-process semaphores we will need to support
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
120 121
 */
void
122
InitProcGlobal(int maxBackends)
123
{
124
	bool		found = false;
125

126 127
	/* attach to the free list */
	ProcGlobal = (PROC_HDR *)
128
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
129

130 131
	/* --------------------
	 * We're the first - initialize.
132 133
	 * XXX if found should ever be true, it is a sign of impending doom ...
	 * ought to complain if so?
134 135 136
	 * --------------------
	 */
	if (!found)
137
	{
138
		int			i;
139

140
		ProcGlobal->freeProcs = INVALID_OFFSET;
141 142 143
		for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
		{
			ProcGlobal->procSemIds[i] = -1;
144
			ProcGlobal->freeSemMap[i] = 0;
145
		}
146

B
Bruce Momjian 已提交
147 148
		/*
		 * Arrange to delete semas on exit --- set this up now so that we
149 150 151 152
		 * will clean up if pre-allocation fails.  We use our own freeproc,
		 * rather than IpcSemaphoreCreate's removeOnExit option, because
		 * we don't want to fill up the on_shmem_exit list with a separate
		 * entry for each semaphore set.
153
		 */
154
		on_shmem_exit(ProcFreeAllSemaphores, 0);
155

B
Bruce Momjian 已提交
156
		/*
157
		 * Pre-create the semaphores for the first maxBackends processes.
158
		 */
159 160 161
		Assert(maxBackends > 0 && maxBackends <= MAXBACKENDS);

		for (i = 0; i < ((maxBackends-1)/PROC_NSEMS_PER_SET+1); i++)
162
		{
163 164 165 166 167 168 169
			IpcSemaphoreId		semId;

			semId = IpcSemaphoreCreate(PROC_NSEMS_PER_SET,
									   IPCProtection,
									   1,
									   false);
			ProcGlobal->procSemIds[i] = semId;
170
		}
171 172 173 174 175 176 177 178 179
	}
}

/* ------------------------
 * InitProc -- create a per-process data structure for this process
 * used by the lock manager on semaphore queues.
 * ------------------------
 */
void
180
InitProcess(void)
181
{
182 183 184
	bool		found = false;
	unsigned long location,
				myOffset;
185 186 187

	SpinAcquire(ProcStructLock);

188
	/* attach to the ProcGlobal structure */
189
	ProcGlobal = (PROC_HDR *)
190
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
191
	if (!found)
192
	{
193
		/* this should not happen. InitProcGlobal() is called before this. */
194
		elog(STOP, "InitProcess: Proc Header uninitialized");
195
	}
196 197

	if (MyProc != NULL)
198
	{
199
		SpinRelease(ProcStructLock);
200
		elog(ERROR, "ProcInit: you already exist");
201
	}
202

203
	/* try to get a proc struct from the free list first */
204 205 206 207

	myOffset = ProcGlobal->freeProcs;

	if (myOffset != INVALID_OFFSET)
208
	{
209 210 211 212 213 214 215
		MyProc = (PROC *) MAKE_PTR(myOffset);
		ProcGlobal->freeProcs = MyProc->links.next;
	}
	else
	{

		/*
216 217 218 219
		 * have to allocate one.  We can't use the normal shmem index
		 * table mechanism because the proc structure is stored by PID
		 * instead of by a global name (need to look it up by PID when we
		 * cleanup dead processes).
220 221
		 */

222
		MyProc = (PROC *) ShmemAlloc(sizeof(PROC));
223
		if (!MyProc)
224
		{
225 226
			SpinRelease(ProcStructLock);
			elog(FATAL, "cannot create new proc: out of memory");
227
		}
228 229 230

		/* this cannot be initialized until after the buffer pool */
		SHMQueueInit(&(MyProc->lockQueue));
231
	}
232

233
	/*
234 235 236
	 * zero out the spin lock counts and set the sLocks field for
	 * ProcStructLock to 1 as we have acquired this spinlock above but
	 * didn't record it since we didn't have MyProc until now.
237
	 */
B
Bruce Momjian 已提交
238
	MemSet(MyProc->sLocks, 0, sizeof(MyProc->sLocks));
239 240 241 242 243
	MyProc->sLocks[ProcStructLock] = 1;


	if (IsUnderPostmaster)
	{
244 245 246
		IpcSemaphoreId	semId;
		int				semNum;
		union semun		semun;
247

248
		ProcGetNewSemIdAndNum(&semId, &semNum);
249 250 251 252 253

		/*
		 * we might be reusing a semaphore that belongs to a dead backend.
		 * So be careful and reinitialize its value here.
		 */
254
		semun.val = 1;
255 256
		semctl(semId, semNum, SETVAL, semun);

257
		IpcSemaphoreLock(semId, semNum);
258 259 260 261 262 263 264 265 266 267 268 269
		MyProc->sem.semId = semId;
		MyProc->sem.semNum = semNum;
	}
	else
		MyProc->sem.semId = -1;

	/* ----------------------
	 * Release the lock.
	 * ----------------------
	 */
	SpinRelease(ProcStructLock);

B
Bruce Momjian 已提交
270
	MyProc->pid = MyProcPid;
271
	MyProc->databaseId = MyDatabaseId;
272
	MyProc->xid = InvalidTransactionId;
273
	MyProc->xmin = InvalidTransactionId;
274 275 276 277 278 279

	/* ----------------
	 * Start keeping spin lock stats from here on.	Any botch before
	 * this initialization is forever botched
	 * ----------------
	 */
B
Bruce Momjian 已提交
280
	MemSet(MyProc->sLocks, 0, MAX_SPINS * sizeof(*MyProc->sLocks));
281 282

	/* -------------------------
283
	 * Install ourselves in the shmem index table.	The name to
284 285 286 287 288 289
	 * use is determined by the OS-assigned process id.  That
	 * allows the cleanup process to find us after any untimely
	 * exit.
	 * -------------------------
	 */
	location = MAKE_OFFSET(MyProc);
B
Bruce Momjian 已提交
290
	if ((!ShmemPIDLookup(MyProcPid, &location)) || (location != MAKE_OFFSET(MyProc)))
291
		elog(STOP, "InitProcess: ShmemPID table broken");
292 293 294 295

	MyProc->errType = NO_ERROR;
	SHMQueueElemInit(&(MyProc->links));

296
	on_shmem_exit(ProcKill, (Datum) MyProcPid);
297 298
}

H
Hiroshi Inoue 已提交
299 300 301 302
/* -----------------------
 * get off the wait queue
 * -----------------------
 */
303
static bool
H
Hiroshi Inoue 已提交
304 305
GetOffWaitqueue(PROC *proc)
{
306 307
	bool		getoffed = false;

H
Hiroshi Inoue 已提交
308 309 310
	LockLockTable();
	if (proc->links.next != INVALID_OFFSET)
	{
311
		int			lockmode = proc->token;
312
		LOCK	*waitLock = proc->waitLock;
313

314 315
		Assert(waitLock);
		Assert(waitLock->waitProcs.size > 0);
H
Hiroshi Inoue 已提交
316
		SHMQueueDelete(&(proc->links));
317 318 319 320 321 322 323 324 325
		--waitLock->waitProcs.size;
		Assert(waitLock->nHolding > 0);
		Assert(waitLock->nHolding > proc->waitLock->nActive);
		--waitLock->nHolding;
		Assert(waitLock->holders[lockmode] > 0);
		--waitLock->holders[lockmode];
		if (waitLock->activeHolders[lockmode] == waitLock->holders[lockmode])
			waitLock->waitMask &= ~(1 << lockmode);
		ProcLockWakeup(&(waitLock->waitProcs), LOCK_LOCKMETHOD(*waitLock), waitLock);
326
		getoffed = true;
H
Hiroshi Inoue 已提交
327 328 329 330
	}
	SHMQueueElemInit(&(proc->links));
	UnlockLockTable();

331
	return getoffed;
H
Hiroshi Inoue 已提交
332
}
333

334 335 336 337 338 339 340
/*
 * ProcReleaseLocks() -- release all locks associated with this process
 *
 */
void
ProcReleaseLocks()
{
341 342 343
	if (!MyProc)
		return;
	LockReleaseAll(1, &MyProc->lockQueue);
H
Hiroshi Inoue 已提交
344
	GetOffWaitqueue(MyProc);
345 346 347 348
}

/*
 * ProcRemove -
349
 *	  used by the postmaster to clean up the global tables. This also frees
350
 *	  up the semaphore used for the lmgr of the process.
351 352 353 354
 */
bool
ProcRemove(int pid)
{
355 356
	SHMEM_OFFSET location;
	PROC	   *proc;
357 358 359 360 361

	location = INVALID_OFFSET;

	location = ShmemPIDDestroy(pid);
	if (location == INVALID_OFFSET)
362
		return FALSE;
363 364 365 366
	proc = (PROC *) MAKE_PTR(location);

	SpinAcquire(ProcStructLock);

367
	ProcFreeSem(proc->sem.semId, proc->sem.semNum);
368 369 370 371 372 373

	proc->links.next = ProcGlobal->freeProcs;
	ProcGlobal->freeProcs = MAKE_OFFSET(proc);

	SpinRelease(ProcStructLock);

374
	return TRUE;
375 376 377 378
}

/*
 * ProcKill() -- Destroy the per-proc data structure for
379
 *		this process. Release any of its held spin locks.
380 381
 */
static void
382
ProcKill(int exitStatus, Datum pid)
383
{
384 385
	PROC	   *proc;
	SHMEM_OFFSET location;
386 387 388 389 390 391 392 393 394 395

	/* --------------------
	 * If this is a FATAL exit the postmaster will have to kill all the
	 * existing backends and reinitialize shared memory.  So all we don't
	 * need to do anything here.
	 * --------------------
	 */
	if (exitStatus != 0)
		return;

B
Bruce Momjian 已提交
396
	ShmemPIDLookup(MyProcPid, &location);
397 398 399 400 401
	if (location == INVALID_OFFSET)
		return;

	proc = (PROC *) MAKE_PTR(location);

402
	Assert(proc == MyProc || (int)pid != MyProcPid);
403 404

	MyProc = NULL;
405 406 407 408 409 410

	/* ---------------
	 * Assume one lock table.
	 * ---------------
	 */
	ProcReleaseSpins(proc);
M
 
Marc G. Fournier 已提交
411
	LockReleaseAll(DEFAULT_LOCKMETHOD, &proc->lockQueue);
412

413
#ifdef USER_LOCKS
414

M
 
Marc G. Fournier 已提交
415 416 417 418
	/*
	 * Assume we have a second lock table.
	 */
	LockReleaseAll(USER_LOCKMETHOD, &proc->lockQueue);
419 420
#endif

421 422 423 424
	/* ----------------
	 * get off the wait queue
	 * ----------------
	 */
H
Hiroshi Inoue 已提交
425
	GetOffWaitqueue(proc);
426 427

	return;
428 429 430 431
}

/*
 * ProcQueue package: routines for putting processes to sleep
432
 *		and  waking them up
433 434 435 436 437 438 439 440
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
441
#ifdef NOT_USED
442
PROC_QUEUE *
443 444
ProcQueueAlloc(char *name)
{
445 446
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
447
		ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
448 449

	if (!queue)
450
		return NULL;
451 452
	if (!found)
		ProcQueueInit(queue);
453
	return queue;
454
}
455

456
#endif
457 458 459 460 461

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
462
ProcQueueInit(PROC_QUEUE *queue)
463
{
464 465
	SHMQueueInit(&(queue->links));
	queue->size = 0;
466 467 468
}


469 470 471 472
/*
 *	Handling cancel request while waiting for lock
 *
 */
473
static bool lockWaiting = false;
474

475 476
void
SetWaitingForLock(bool waiting)
477
{
478 479
	if (waiting == lockWaiting)
		return;
480
	lockWaiting = waiting;
481 482
	if (lockWaiting)
	{
483 484 485 486 487 488
		/* The lock was already released ? */
		if (MyProc->links.next == INVALID_OFFSET)
		{
			lockWaiting = false;
			return;
		}
489
		if (QueryCancel)		/* cancel request pending */
490 491 492 493 494 495 496 497
		{
			if (GetOffWaitqueue(MyProc))
			{
				lockWaiting = false;
				elog(ERROR, "Query cancel requested while waiting lock");
			}
		}
	}
498
}
499

500 501
void
LockWaitCancel(void)
502
{
503
#ifndef __BEOS__ 	
504
	struct itimerval timeval,
505
				dummy;
506

507 508
	if (!lockWaiting)
		return;
509 510 511 512
	lockWaiting = false;
	/* Deadlock timer off */
	MemSet(&timeval, 0, sizeof(struct itimerval));
	setitimer(ITIMER_REAL, &timeval, &dummy);
513
#else
514
	/* BeOS doesn't have setitimer, but has set_alarm */
515 516 517 518 519 520 521
	if (!lockWaiting)
		return;
	lockWaiting = false;
	/* Deadlock timer off */
    set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM);
#endif /* __BEOS__ */
        
522 523 524
	if (GetOffWaitqueue(MyProc))
		elog(ERROR, "Query cancel requested while waiting lock");
}
525 526 527 528 529 530 531 532

/*
 * ProcSleep -- put a process to sleep
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is cleared by default, so the first time we try
 * to acquire it, we sleep.
 *
533 534
 * Result is NO_ERROR if we acquired the lock, STATUS_ERROR if not (deadlock).
 *
535
 * ASSUME: that no one will fiddle with the queue until after
536
 *		we release the spin lock.
537 538 539 540
 *
 * NOTES: The process queue is now a priority queue for locking.
 */
int
541
ProcSleep(PROC_QUEUE *waitQueue,/* lock->waitProcs */
542
		  LOCKMETHODCTL *lockctl,
543
		  int token,			/* lockmode */
V
Vadim B. Mikheev 已提交
544
		  LOCK *lock)
545
{
546
	int			i;
V
Vadim B. Mikheev 已提交
547
	SPINLOCK	spinlock = lockctl->masterLock;
548
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
549 550 551 552 553
	int			myMask = (1 << token);
	int			waitMask = lock->waitMask;
	int			aheadHolders[MAX_LOCKMODES];
	bool		selfConflict = (lockctl->conflictTab[token] & myMask),
				prevSame = false;
554
#ifndef __BEOS__
B
Bruce Momjian 已提交
555 556
	struct itimerval timeval,
				dummy;
557 558 559
#else
    bigtime_t time_interval;
#endif
560

V
Vadim B. Mikheev 已提交
561 562 563
	MyProc->token = token;
	MyProc->waitLock = lock;

B
Bruce Momjian 已提交
564
	proc = (PROC *) MAKE_PTR(waitQueue->links.prev);
565

V
Vadim B. Mikheev 已提交
566 567 568
	/* if we don't conflict with any waiter - be first in queue */
	if (!(lockctl->conflictTab[token] & waitMask))
		goto ins;
569

V
Vadim B. Mikheev 已提交
570 571 572
	for (i = 1; i < MAX_LOCKMODES; i++)
		aheadHolders[i] = lock->activeHolders[i];
	(aheadHolders[token])++;
573

V
Vadim B. Mikheev 已提交
574 575 576 577 578 579 580 581
	for (i = 0; i < waitQueue->size; i++)
	{
		/* am I waiting for him ? */
		if (lockctl->conflictTab[token] & proc->holdLock)
		{
			/* is he waiting for me ? */
			if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			{
582
				/* Yes, report deadlock failure */
V
Vadim B. Mikheev 已提交
583 584 585 586 587 588 589 590 591 592 593
				MyProc->errType = STATUS_ERROR;
				goto rt;
			}
			/* being waiting for him - go past */
		}
		/* if he waits for me */
		else if (lockctl->conflictTab[proc->token] & MyProc->holdLock)
			break;
		/* if conflicting locks requested */
		else if (lockctl->conflictTab[proc->token] & myMask)
		{
B
Bruce Momjian 已提交
594

V
Vadim B. Mikheev 已提交
595
			/*
B
Bruce Momjian 已提交
596 597
			 * If I request non self-conflicting lock and there are others
			 * requesting the same lock just before me - stay here.
V
Vadim B. Mikheev 已提交
598 599 600 601
			 */
			if (!selfConflict && prevSame)
				break;
		}
B
Bruce Momjian 已提交
602

V
Vadim B. Mikheev 已提交
603
		/*
B
Bruce Momjian 已提交
604 605
		 * Last attempt to don't move any more: if we don't conflict with
		 * rest waiters in queue.
V
Vadim B. Mikheev 已提交
606 607 608
		 */
		else if (!(lockctl->conflictTab[token] & waitMask))
			break;
609

V
Vadim B. Mikheev 已提交
610 611 612
		prevSame = (proc->token == token);
		(aheadHolders[proc->token])++;
		if (aheadHolders[proc->token] == lock->holders[proc->token])
B
Bruce Momjian 已提交
613
			waitMask &= ~(1 << proc->token);
V
Vadim B. Mikheev 已提交
614 615
		proc = (PROC *) MAKE_PTR(proc->links.prev);
	}
616

V
Vadim B. Mikheev 已提交
617
ins:;
618 619 620 621 622 623
	/* -------------------
	 * assume that these two operations are atomic (because
	 * of the spinlock).
	 * -------------------
	 */
	SHMQueueInsertTL(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
624
	waitQueue->size++;
625

V
Vadim B. Mikheev 已提交
626
	lock->waitMask |= myMask;
627 628
	SpinRelease(spinlock);

629 630
	MyProc->errType = NO_ERROR;		/* initialize result for success */

631
	/* --------------
632 633 634 635 636 637 638
	 * Set timer so we can wake up after awhile and check for a deadlock.
	 * If a deadlock is detected, the handler releases the process's
	 * semaphore and sets MyProc->errType = STATUS_ERROR, allowing us to
	 * know that we must report failure rather than success.
	 *
	 * By delaying the check until we've waited for a bit, we can avoid
	 * running the rather expensive deadlock-check code in most cases.
B
Bruce Momjian 已提交
639 640 641
	 *
	 * Need to zero out struct to set the interval and the micro seconds fields
	 * to 0.
642 643
	 * --------------
	 */
644
#ifndef __BEOS__
B
Bruce Momjian 已提交
645
	MemSet(&timeval, 0, sizeof(struct itimerval));
646 647
	timeval.it_value.tv_sec = DeadlockTimeout / 1000;
	timeval.it_value.tv_usec = (DeadlockTimeout % 1000) * 1000;
648 649
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
		elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
650
#else
651 652 653
    time_interval = DeadlockTimeout * 1000000; /* usecs */
	if (set_alarm(time_interval, B_ONE_SHOT_RELATIVE_ALARM) < 0)
		elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
654
#endif
655

656
	SetWaitingForLock(true);
657

658 659 660 661 662 663 664 665 666 667 668
	/* --------------
	 * If someone wakes us between SpinRelease and IpcSemaphoreLock,
	 * IpcSemaphoreLock will not block.  The wakeup is "saved" by
	 * the semaphore implementation.  Note also that if HandleDeadLock
	 * is invoked but does not detect a deadlock, IpcSemaphoreLock()
	 * will continue to wait.  There used to be a loop here, but it
	 * was useless code...
	 * --------------
	 */
	IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum);

669
	lockWaiting = false;
670

B
Bruce Momjian 已提交
671
	/* ---------------
672
	 * Disable the timer, if it's still running
B
Bruce Momjian 已提交
673 674
	 * ---------------
	 */
675
#ifndef __BEOS__
B
Bruce Momjian 已提交
676
	timeval.it_value.tv_sec = 0;
677
	timeval.it_value.tv_usec = 0;
B
Bruce Momjian 已提交
678
	if (setitimer(ITIMER_REAL, &timeval, &dummy))
679
		elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
680 681
#else
    if (set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM) < 0)
682
		elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
683
#endif
B
Bruce Momjian 已提交
684

685 686 687 688 689 690 691
	/* ----------------
	 * We were assumed to be in a critical section when we went
	 * to sleep.
	 * ----------------
	 */
	SpinAcquire(spinlock);

V
Vadim B. Mikheev 已提交
692 693
rt:;

694
#ifdef LOCK_DEBUG
M
 
Marc G. Fournier 已提交
695
	/* Just to get meaningful debug messages from DumpLocks() */
696
	MyProc->waitLock = (LOCK *) NULL;
M
 
Marc G. Fournier 已提交
697 698
#endif

699
	return MyProc->errType;
700 701 702 703 704 705
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
706 707
 *	 remove the process from the wait queue and set its links invalid.
 *	 RETURN: the next process in the wait queue.
708
 */
B
Bruce Momjian 已提交
709
PROC *
710
ProcWakeup(PROC *proc, int errType)
711
{
712
	PROC	   *retProc;
713 714 715 716 717

	/* assume that spinlock has been acquired */

	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
718
		return (PROC *) NULL;
719 720 721 722 723 724 725 726 727

	retProc = (PROC *) MAKE_PTR(proc->links.prev);

	/* you have to update waitLock->waitProcs.size yourself */
	SHMQueueDelete(&(proc->links));
	SHMQueueElemInit(&(proc->links));

	proc->errType = errType;

728
	IpcSemaphoreUnlock(proc->sem.semId, proc->sem.semNum);
729 730

	return retProc;
731 732 733 734
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
735
 *		released.
736 737
 */
int
738
ProcLockWakeup(PROC_QUEUE *queue, LOCKMETHOD lockmethod, LOCK *lock)
739
{
740
	PROC	   *proc;
V
Vadim B. Mikheev 已提交
741 742
	int			count = 0;
	int			last_locktype = 0;
M
 
Marc G. Fournier 已提交
743 744 745
	int			queue_size = queue->size;

	Assert(queue->size >= 0);
746 747

	if (!queue->size)
748
		return STATUS_NOT_FOUND;
749 750

	proc = (PROC *) MAKE_PTR(queue->links.prev);
M
 
Marc G. Fournier 已提交
751 752
	while ((queue_size--) && (proc))
	{
753

M
 
Marc G. Fournier 已提交
754
		/*
755 756
		 * This proc will conflict as the previous one did, don't even
		 * try.
M
 
Marc G. Fournier 已提交
757 758 759 760 761
		 */
		if (proc->token == last_locktype)
			continue;

		/*
V
Vadim B. Mikheev 已提交
762
		 * Does this proc conflict with locks held by others ?
M
 
Marc G. Fournier 已提交
763 764
		 */
		if (LockResolveConflicts(lockmethod,
765
								 lock,
766
								 proc->token,
M
 
Marc G. Fournier 已提交
767 768 769
								 proc->xid,
								 (XIDLookupEnt *) NULL) != STATUS_OK)
		{
V
Vadim B. Mikheev 已提交
770 771
			if (count != 0)
				break;
M
 
Marc G. Fournier 已提交
772 773 774
			last_locktype = proc->token;
			continue;
		}
775 776 777 778 779 780 781

		/*
		 * there was a waiting process, grant it the lock before waking it
		 * up.	This will prevent another process from seizing the lock
		 * between the time we release the lock master (spinlock) and the
		 * time that the awoken process begins executing again.
		 */
782
		GrantLock(lock, proc->token);
783 784 785

		/*
		 * ProcWakeup removes proc from the lock waiting process queue and
786
		 * returns the next proc in chain.
787 788 789
		 */

		count++;
M
 
Marc G. Fournier 已提交
790 791
		queue->size--;
		proc = ProcWakeup(proc, NO_ERROR);
792
	}
793

M
 
Marc G. Fournier 已提交
794 795
	Assert(queue->size >= 0);

796
	if (count)
797
		return STATUS_OK;
798 799
	else
	{
800
		/* Something is still blocking us.	May have deadlocked. */
801 802 803 804 805
#ifdef LOCK_DEBUG
		if (lock->tag.lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
		{
			elog(DEBUG, "ProcLockWakeup: lock(%lx) can't wake up any process", MAKE_OFFSET(lock));
			if (Debug_deadlocks)
M
 
Marc G. Fournier 已提交
806
			DumpAllLocks();
807
		}
M
 
Marc G. Fournier 已提交
808
#endif
809
		return STATUS_NOT_FOUND;
M
 
Marc G. Fournier 已提交
810
	}
811 812 813
}

void
814
ProcAddLock(SHM_QUEUE *elem)
815
{
816
	SHMQueueInsertTL(&MyProc->lockQueue, elem);
817 818 819
}

/* --------------------
820
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
B
Bruce Momjian 已提交
821 822
 * while waiting for a lock to be released by some other process.  If we have
 * a real deadlock, we must also indicate that I'm no longer waiting
823
 * on a lock so that other processes don't try to wake me up and screw
824 825 826
 * up my semaphore.
 * --------------------
 */
827
void
828
HandleDeadLock(SIGNAL_ARGS)
829
{
B
Bruce Momjian 已提交
830
	LOCK	   *mywaitlock;
831 832 833 834 835 836 837 838 839 840

	LockLockTable();

	/* ---------------------
	 * Check to see if we've been awoken by anyone in the interim.
	 *
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to
	 * us so we know that we don't have to wait anymore.
	 *
841 842 843
	 * We check by looking to see if we've been unlinked from the wait queue.
	 * This is quicker than checking our semaphore's state, since no kernel
	 * call is needed, and it is safe because we hold the locktable lock.
844 845 846 847 848 849 850 851 852
	 * ---------------------
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
	{
		UnlockLockTable();
		return;
	}

853 854 855
#ifdef LOCK_DEBUG
    if (Debug_deadlocks)
        DumpAllLocks();
856 857
#endif

B
Bruce Momjian 已提交
858
	if (!DeadLockCheck(MyProc, MyProc->waitLock))
B
Bruce Momjian 已提交
859
	{
860
		/* No deadlock, so keep waiting */
B
Bruce Momjian 已提交
861 862 863 864
		UnlockLockTable();
		return;
	}

865 866 867 868
	/* ------------------------
	 * Get this process off the lock's wait queue
	 * ------------------------
	 */
869
	mywaitlock = MyProc->waitLock;
B
Bruce Momjian 已提交
870
	Assert(mywaitlock->waitProcs.size > 0);
871
	lockWaiting = false;
B
Bruce Momjian 已提交
872
	--mywaitlock->waitProcs.size;
873 874 875 876
	SHMQueueDelete(&(MyProc->links));
	SHMQueueElemInit(&(MyProc->links));

	/* ------------------
877
	 * Unlock my semaphore so that the interrupted ProcSleep() call can finish.
878 879
	 * ------------------
	 */
880
	IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum);
881 882 883 884 885 886 887 888 889 890 891 892 893 894

	/* -------------
	 * Set MyProc->errType to STATUS_ERROR so that we abort after
	 * returning from this handler.
	 * -------------
	 */
	MyProc->errType = STATUS_ERROR;

	/*
	 * if this doesn't follow the IpcSemaphoreUnlock then we get lock
	 * table corruption ("LockReplace: xid table corrupted") due to race
	 * conditions.	i don't claim to understand this...
	 */
	UnlockLockTable();
895 896 897
}

void
898
ProcReleaseSpins(PROC *proc)
899
{
900
	int			i;
901 902 903 904 905 906 907

	if (!proc)
		proc = MyProc;

	if (!proc)
		return;
	for (i = 0; i < (int) MAX_SPINS; i++)
908
	{
909
		if (proc->sLocks[i])
910
		{
911 912
			Assert(proc->sLocks[i] == 1);
			SpinRelease(i);
913 914
		}
	}
H
 
Hiroshi Inoue 已提交
915
	AbortBufferIO();
916 917 918
}

/*****************************************************************************
919
 *
920 921 922
 *****************************************************************************/

/*
923
 * ProcGetNewSemIdAndNum -
924
 *	  scan the free semaphore bitmap and allocate a single semaphore from
925
 *	  a semaphore set.
926 927
 */
static void
928
ProcGetNewSemIdAndNum(IpcSemaphoreId *semId, int *semNum)
929
{
930
	int			i;
931
	IpcSemaphoreId *procSemIds = ProcGlobal->procSemIds;
932
	int32	   *freeSemMap = ProcGlobal->freeSemMap;
933
	int32		fullmask = (1 << PROC_NSEMS_PER_SET) - 1;
934

935 936 937 938
	/*
	 * we hold ProcStructLock when entering this routine. We scan through
	 * the bitmap to look for a free semaphore.
	 */
939

940
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
941
	{
942 943
		int			mask = 1;
		int			j;
944 945

		if (freeSemMap[i] == fullmask)
946
			continue;			/* this set is fully allocated */
947 948
		if (procSemIds[i] < 0)
			continue;			/* this set hasn't been initialized */
949 950 951 952 953 954 955

		for (j = 0; j < PROC_NSEMS_PER_SET; j++)
		{
			if ((freeSemMap[i] & mask) == 0)
			{

				/*
956
				 * a free semaphore found. Mark it as allocated.
957
				 */
958
				freeSemMap[i] |= mask;
959

960
				*semId = procSemIds[i];
961 962 963 964 965
				*semNum = j;
				return;
			}
			mask <<= 1;
		}
966 967
	}

968
	/* if we reach here, all the semaphores are in use. */
969
	elog(ERROR, "ProcGetNewSemIdAndNum: cannot allocate a free semaphore");
970 971 972 973
}

/*
 * ProcFreeSem -
974
 *	  free up our semaphore in the semaphore set.
975 976
 */
static void
977
ProcFreeSem(IpcSemaphoreId semId, int semNum)
978
{
979
	int32		mask;
980
	int			i;
981

982
	mask = ~(1 << semNum);
983

984 985 986 987 988 989 990 991 992
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
	{
		if (ProcGlobal->procSemIds[i] == semId)
		{
			ProcGlobal->freeSemMap[i] &= mask;
			return;
		}
	}
	fprintf(stderr, "ProcFreeSem: no ProcGlobal entry for semId %d\n", semId);
993 994 995 996
}

/*
 * ProcFreeAllSemaphores -
997 998 999
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated to the lmgrs of the backends.
1000
 */
1001
static void
1002
ProcFreeAllSemaphores(void)
1003
{
1004
	int			i;
1005

1006
	for (i = 0; i < PROC_SEM_MAP_ENTRIES; i++)
1007
	{
1008 1009
		if (ProcGlobal->procSemIds[i] >= 0)
			IpcSemaphoreKill(ProcGlobal->procSemIds[i]);
1010
	}
1011
}