proc.c 44.1 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
6
 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
B
Add:  
Bruce Momjian 已提交
7
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.196 2007/10/26 20:45:10 alvherre Exp $
12 13 14 15 16
 *
 *-------------------------------------------------------------------------
 */
/*
 * Interface (a):
17
 *		ProcSleep(), ProcWakeup(),
18 19
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
20
 *
21 22
 * Waiting for a lock causes the backend to be put to sleep.  Whoever releases
 * the lock wakes the process up again (and gives it an error code so it knows
23 24 25 26
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
27 28
 * ProcReleaseLocks -- frees the locks associated with current transaction
 *
29
 * ProcKill -- destroys the shared memory state (and locks)
30
 * associated with the process.
31
 */
32 33
#include "postgres.h"

34
#include <signal.h>
35 36
#include <unistd.h>
#include <sys/time.h>
M
Marc G. Fournier 已提交
37

38
#include "access/transam.h"
39
#include "access/xact.h"
40
#include "miscadmin.h"
41
#include "postmaster/autovacuum.h"
42
#include "storage/ipc.h"
43
#include "storage/lmgr.h"
44
#include "storage/proc.h"
45
#include "storage/procarray.h"
46
#include "storage/spin.h"
47

48

49
/* GUC variables */
B
Bruce Momjian 已提交
50
int			DeadlockTimeout = 1000;
51
int			StatementTimeout = 0;
52
bool		log_lock_waits = false;
M
 
Marc G. Fournier 已提交
53

54
/* Pointer to this process's PGPROC struct, if any */
J
Jan Wieck 已提交
55
PGPROC	   *MyProc = NULL;
56 57

/*
J
Jan Wieck 已提交
58
 * This spinlock protects the freelist of recycled PGPROC structures.
59
 * We cannot use an LWLock because the LWLock manager depends on already
J
Jan Wieck 已提交
60
 * having a PGPROC and a wait semaphore!  But these structures are touched
61 62
 * relatively infrequently (only at backend startup or shutdown) and not for
 * very long, so a spinlock is okay.
63
 */
64
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
65

66
/* Pointers to shared-memory structures */
67
NON_EXEC_STATIC PROC_HDR *ProcGlobal = NULL;
68
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
69

70 71
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
72

73 74 75
/* Mark these volatile because they can be changed by signal handler */
static volatile bool statement_timeout_active = false;
static volatile bool deadlock_timeout_active = false;
76
static volatile DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
77
volatile bool cancel_from_timeout = false;
B
Bruce Momjian 已提交
78

79 80 81
/* timeout_start_time is set when log_lock_waits is true */
static TimestampTz timeout_start_time;

82
/* statement_fin_time is valid only if statement_timeout_active is true */
83
static TimestampTz statement_fin_time;
84 85


86
static void RemoveProcFromArray(int code, Datum arg);
87
static void ProcKill(int code, Datum arg);
88
static void AuxiliaryProcKill(int code, Datum arg);
89
static bool CheckStatementTimeout(void);
90

V
Vadim B. Mikheev 已提交
91

92 93 94
/*
 * Report shared-memory space needed by InitProcGlobal.
 */
95
Size
96
ProcGlobalShmemSize(void)
97
{
98 99 100 101
	Size		size = 0;

	/* ProcGlobal */
	size = add_size(size, sizeof(PROC_HDR));
102 103
	/* AuxiliaryProcs */
	size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
104
	/* MyProcs, including autovacuum */
105 106 107
	size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
	/* ProcStructLock */
	size = add_size(size, sizeof(slock_t));
108 109 110 111

	return size;
}

112 113 114 115
/*
 * Report number of semaphores needed by InitProcGlobal.
 */
int
116
ProcGlobalSemas(void)
117
{
118 119 120 121
	/*
	 * We need a sema per backend (including autovacuum), plus one for each
	 * auxiliary process.
	 */
122
	return MaxBackends + NUM_AUXILIARY_PROCS;
123 124
}

125 126
/*
 * InitProcGlobal -
127 128
 *	  Initialize the global process table during postmaster or standalone
 *	  backend startup.
129
 *
130
 *	  We also create all the per-process semaphores we will need to support
131 132 133 134 135 136 137
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
138 139
 *	  MaxConnections or autovacuum_max_workers higher than his kernel will
 *	  support, he'll find out sooner rather than later.
140 141 142 143
 *
 *	  Another reason for creating semaphores here is that the semaphore
 *	  implementation typically requires us to create semaphores in the
 *	  postmaster, not in backends.
144 145
 *
 * Note: this is NOT called by individual backends under a postmaster,
146
 * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
147
 * pointers must be propagated specially for EXEC_BACKEND operation.
148 149
 */
void
150
InitProcGlobal(void)
151
{
152 153 154
	PGPROC	   *procs;
	int			i;
	bool		found;
155

156
	/* Create the ProcGlobal shared structure */
157
	ProcGlobal = (PROC_HDR *)
158 159
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
	Assert(!found);
160

161
	/*
162 163
	 * Create the PGPROC structures for auxiliary (bgwriter) processes, too.
	 * These do not get linked into the freeProcs list.
164
	 */
165 166
	AuxiliaryProcs = (PGPROC *)
		ShmemInitStruct("AuxiliaryProcs", NUM_AUXILIARY_PROCS * sizeof(PGPROC),
167 168
						&found);
	Assert(!found);
169

170 171 172 173
	/*
	 * Initialize the data structures.
	 */
	ProcGlobal->freeProcs = INVALID_OFFSET;
174
	ProcGlobal->autovacFreeProcs = INVALID_OFFSET;
175

176
	ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
177

178 179 180
	/*
	 * Pre-create the PGPROC structures and create a semaphore for each.
	 */
181
	procs = (PGPROC *) ShmemAlloc((MaxConnections) * sizeof(PGPROC));
182 183 184 185
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
186 187
	MemSet(procs, 0, MaxConnections * sizeof(PGPROC));
	for (i = 0; i < MaxConnections; i++)
188 189 190 191 192
	{
		PGSemaphoreCreate(&(procs[i].sem));
		procs[i].links.next = ProcGlobal->freeProcs;
		ProcGlobal->freeProcs = MAKE_OFFSET(&procs[i]);
	}
193

194 195 196 197 198 199 200 201 202 203 204 205 206
	procs = (PGPROC *) ShmemAlloc((autovacuum_max_workers) * sizeof(PGPROC));
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
	MemSet(procs, 0, autovacuum_max_workers * sizeof(PGPROC));
	for (i = 0; i < autovacuum_max_workers; i++)
	{
		PGSemaphoreCreate(&(procs[i].sem));
		procs[i].links.next = ProcGlobal->autovacFreeProcs;
		ProcGlobal->autovacFreeProcs = MAKE_OFFSET(&procs[i]);
	}

207 208
	MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC));
	for (i = 0; i < NUM_AUXILIARY_PROCS; i++)
209
	{
210 211
		AuxiliaryProcs[i].pid = 0;	/* marks auxiliary proc as not in use */
		PGSemaphoreCreate(&(AuxiliaryProcs[i].sem));
212
	}
213 214 215 216

	/* Create ProcStructLock spinlock, too */
	ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
	SpinLockInit(ProcStructLock);
217 218
}

219
/*
220
 * InitProcess -- initialize a per-process data structure for this backend
221 222
 */
void
223
InitProcess(void)
224
{
225 226
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;
227 228
	SHMEM_OFFSET myOffset;
	int			i;
229 230

	/*
231 232
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
233
	 */
234
	if (procglobal == NULL)
235
		elog(PANIC, "proc header uninitialized");
236 237

	if (MyProc != NULL)
238
		elog(ERROR, "you already exist");
239

240
	/*
B
Bruce Momjian 已提交
241 242
	 * Try to get a proc struct from the free list.  If this fails, we must be
	 * out of PGPROC structures (not to mention semaphores).
243
	 *
B
Bruce Momjian 已提交
244 245
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
246
	 */
247
	SpinLockAcquire(ProcStructLock);
248

249 250
	set_spins_per_delay(procglobal->spins_per_delay);

251 252 253 254
	if (IsAutoVacuumWorkerProcess())
		myOffset = procglobal->autovacFreeProcs;
	else
		myOffset = procglobal->freeProcs;
255 256

	if (myOffset != INVALID_OFFSET)
257
	{
J
Jan Wieck 已提交
258
		MyProc = (PGPROC *) MAKE_PTR(myOffset);
259 260 261 262
		if (IsAutoVacuumWorkerProcess())
			procglobal->autovacFreeProcs = MyProc->links.next;
		else
			procglobal->freeProcs = MyProc->links.next;
263
		SpinLockRelease(ProcStructLock);
264 265 266 267
	}
	else
	{
		/*
B
Bruce Momjian 已提交
268 269
		 * If we reach here, all the PGPROCs are in use.  This is one of the
		 * possible places to detect "too many backends", so give the standard
270 271
		 * error message.  XXX do we need to give a different failure message
		 * in the autovacuum case?
272
		 */
273
		SpinLockRelease(ProcStructLock);
274 275 276
		ereport(FATAL,
				(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
				 errmsg("sorry, too many clients already")));
277
	}
278

279
	/*
B
Bruce Momjian 已提交
280 281
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
282
	 */
283
	SHMQueueElemInit(&(MyProc->links));
284
	MyProc->waitStatus = STATUS_OK;
285
	MyProc->lxid = InvalidLocalTransactionId;
286
	MyProc->xid = InvalidTransactionId;
287
	MyProc->xmin = InvalidTransactionId;
288
	MyProc->pid = MyProcPid;
289 290
	/* backendId, databaseId and roleId will be filled in later */
	MyProc->backendId = InvalidBackendId;
291
	MyProc->databaseId = InvalidOid;
292
	MyProc->roleId = InvalidOid;
293
	MyProc->inCommit = false;
294 295 296
	MyProc->vacuumFlags = 0;
	if (IsAutoVacuumWorkerProcess())
		MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM;
297 298 299
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
300
	MyProc->waitLock = NULL;
301
	MyProc->waitProcLock = NULL;
302 303
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
304

305
	/*
306
	 * We might be reusing a semaphore that belonged to a failed process. So
B
Bruce Momjian 已提交
307
	 * be careful and reinitialize its value here.	(This is not strictly
308
	 * necessary anymore, but seems like a good idea for cleanliness.)
309
	 */
310
	PGSemaphoreReset(&MyProc->sem);
311

312
	/*
313
	 * Arrange to clean up at backend exit.
314
	 */
315
	on_shmem_exit(ProcKill, 0);
316 317

	/*
B
Bruce Momjian 已提交
318 319
	 * Now that we have a PGPROC, we could try to acquire locks, so initialize
	 * the deadlock checker.
320 321
	 */
	InitDeadLockChecking();
322 323
}

324 325 326 327 328 329 330 331 332 333 334 335 336
/*
 * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
 *
 * This is separate from InitProcess because we can't acquire LWLocks until
 * we've created a PGPROC, but in the EXEC_BACKEND case there is a good deal
 * of stuff to be done before this step that will require LWLock access.
 */
void
InitProcessPhase2(void)
{
	Assert(MyProc != NULL);

	/*
B
Bruce Momjian 已提交
337 338 339
	 * We should now know what database we're in, so advertise that.  (We need
	 * not do any locking here, since no other backend can yet see our
	 * PGPROC.)
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	 */
	Assert(OidIsValid(MyDatabaseId));
	MyProc->databaseId = MyDatabaseId;

	/*
	 * Add our PGPROC to the PGPROC array in shared memory.
	 */
	ProcArrayAdd(MyProc);

	/*
	 * Arrange to clean that up at backend exit.
	 */
	on_shmem_exit(RemoveProcFromArray, 0);
}

355
/*
356
 * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
357
 *
358 359
 * This is called by bgwriter and similar processes so that they will have a
 * MyProc value that's real enough to let them wait for LWLocks.  The PGPROC
360
 * and sema that are assigned are one of the extra ones created during
361
 * InitProcGlobal.
362
 *
363
 * Auxiliary processes are presently not expected to wait for real (lockmgr)
364
 * locks, so we need not set up the deadlock checker.  They are never added
365 366 367
 * to the ProcArray or the sinval messaging mechanism, either.  They also
 * don't get a VXID assigned, since this is only useful when we actually
 * hold lockmgr locks.
368 369
 */
void
370
InitAuxiliaryProcess(void)
371
{
372
	PGPROC	   *auxproc;
373
	int			proctype;
374
	int			i;
J
Jan Wieck 已提交
375

376
	/*
377 378
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
379
	 */
380
	if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
381
		elog(PANIC, "proc header uninitialized");
382 383

	if (MyProc != NULL)
384
		elog(ERROR, "you already exist");
385

386
	/*
387
	 * We use the ProcStructLock to protect assignment and releasing of
388
	 * AuxiliaryProcs entries.
389
	 *
B
Bruce Momjian 已提交
390 391
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
392 393 394 395 396
	 */
	SpinLockAcquire(ProcStructLock);

	set_spins_per_delay(ProcGlobal->spins_per_delay);

397
	/*
398
	 * Find a free auxproc ... *big* trouble if there isn't one ...
399
	 */
400
	for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
401
	{
402 403
		auxproc = &AuxiliaryProcs[proctype];
		if (auxproc->pid == 0)
404 405
			break;
	}
406
	if (proctype >= NUM_AUXILIARY_PROCS)
407 408
	{
		SpinLockRelease(ProcStructLock);
409
		elog(FATAL, "all AuxiliaryProcs are in use");
410
	}
411

412
	/* Mark auxiliary proc as in use by me */
413
	/* use volatile pointer to prevent code rearrangement */
414
	((volatile PGPROC *) auxproc)->pid = MyProcPid;
415

416
	MyProc = auxproc;
417 418 419

	SpinLockRelease(ProcStructLock);

420
	/*
421 422
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
423 424
	 */
	SHMQueueElemInit(&(MyProc->links));
425
	MyProc->waitStatus = STATUS_OK;
426
	MyProc->lxid = InvalidLocalTransactionId;
427 428
	MyProc->xid = InvalidTransactionId;
	MyProc->xmin = InvalidTransactionId;
429
	MyProc->backendId = InvalidBackendId;
430
	MyProc->databaseId = InvalidOid;
431
	MyProc->roleId = InvalidOid;
432
	MyProc->inCommit = false;
433 434
	/* we don't set the "is autovacuum" flag in the launcher */
	MyProc->vacuumFlags = 0;
435 436 437 438
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
	MyProc->waitLock = NULL;
439
	MyProc->waitProcLock = NULL;
440 441
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
442 443

	/*
B
Bruce Momjian 已提交
444
	 * We might be reusing a semaphore that belonged to a failed process. So
B
Bruce Momjian 已提交
445
	 * be careful and reinitialize its value here.	(This is not strictly
446
	 * necessary anymore, but seems like a good idea for cleanliness.)
447
	 */
448
	PGSemaphoreReset(&MyProc->sem);
449 450 451 452

	/*
	 * Arrange to clean up at process exit.
	 */
453
	on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
454 455
}

456 457 458 459 460 461 462 463 464 465
/*
 * Check whether there are at least N free PGPROC objects.
 *
 * Note: this is designed on the assumption that N will generally be small.
 */
bool
HaveNFreeProcs(int n)
{
	SHMEM_OFFSET offset;
	PGPROC	   *proc;
B
Bruce Momjian 已提交
466

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	SpinLockAcquire(ProcStructLock);

	offset = procglobal->freeProcs;

	while (n > 0 && offset != INVALID_OFFSET)
	{
		proc = (PGPROC *) MAKE_PTR(offset);
		offset = proc->links.next;
		n--;
	}

	SpinLockRelease(ProcStructLock);

	return (n <= 0);
}

486 487 488
/*
 * Cancel any pending wait for lock, when aborting a transaction.
 *
489 490
 * Returns true if we had been waiting for a lock, else false.
 *
491
 * (Normally, this would only happen if we accept a cancel/die
492
 * interrupt while waiting; but an ereport(ERROR) while waiting is
493 494
 * within the realm of possibility, too.)
 */
495
bool
496 497
LockWaitCancel(void)
{
498 499
	LWLockId	partitionLock;

500
	/* Nothing to do if we weren't waiting for a lock */
501
	if (lockAwaited == NULL)
502 503
		return false;

504
	/* Turn off the deadlock timer, if it's still running (see ProcSleep) */
505
	disable_sig_alarm(false);
506 507

	/* Unlink myself from the wait queue, if on it (might not be anymore!) */
508
	partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
509
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
510

511
	if (MyProc->links.next != INVALID_OFFSET)
512 513
	{
		/* We could not have been granted the lock yet */
514
		RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
515 516 517 518 519
	}
	else
	{
		/*
		 * Somebody kicked us off the lock queue already.  Perhaps they
B
Bruce Momjian 已提交
520 521 522
		 * granted us the lock, or perhaps they detected a deadlock. If they
		 * did grant us the lock, we'd better remember it in our local lock
		 * table.
523
		 */
524 525
		if (MyProc->waitStatus == STATUS_OK)
			GrantAwaitedLock();
526 527
	}

528
	lockAwaited = NULL;
529

530
	LWLockRelease(partitionLock);
H
Hiroshi Inoue 已提交
531

532
	/*
533
	 * We used to do PGSemaphoreReset() here to ensure that our proc's wait
B
Bruce Momjian 已提交
534 535 536 537 538 539
	 * semaphore is reset to zero.	This prevented a leftover wakeup signal
	 * from remaining in the semaphore if someone else had granted us the lock
	 * we wanted before we were able to remove ourselves from the wait-list.
	 * However, now that ProcSleep loops until waitStatus changes, a leftover
	 * wakeup signal isn't harmful, and it seems not worth expending cycles to
	 * get rid of a signal that most likely isn't there.
540
	 */
541 542

	/*
B
Bruce Momjian 已提交
543 544
	 * Return true even if we were kicked off the lock before we were able to
	 * remove ourselves.
545 546
	 */
	return true;
H
Hiroshi Inoue 已提交
547
}
548

549

550
/*
551
 * ProcReleaseLocks() -- release locks associated with current transaction
552
 *			at main transaction commit or abort
553 554 555 556 557 558
 *
 * At main transaction commit, we release all locks except session locks.
 * At main transaction abort, we release all locks including session locks;
 * this lets us clean up after a VACUUM FULL failure.
 *
 * At subtransaction commit, we don't release any locks (so this func is not
559
 * needed at all); we will defer the releasing to the parent transaction.
560
 * At subtransaction abort, we release all locks held by the subtransaction;
561 562
 * this is implemented by retail releasing of the locks under control of
 * the ResourceOwner mechanism.
563 564
 *
 * Note that user locks are not released in any case.
565 566
 */
void
567
ProcReleaseLocks(bool isCommit)
568
{
569 570
	if (!MyProc)
		return;
571 572 573
	/* If waiting, get off wait queue (should only be needed after error) */
	LockWaitCancel();
	/* Release locks */
574
	LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
575 576 577
}


578 579 580 581 582 583 584
/*
 * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
 */
static void
RemoveProcFromArray(int code, Datum arg)
{
	Assert(MyProc != NULL);
585
	ProcArrayRemove(MyProc, InvalidTransactionId);
586 587
}

588 589
/*
 * ProcKill() -- Destroy the per-proc data structure for
590
 *		this process. Release any of its held LW locks.
591 592
 */
static void
593
ProcKill(int code, Datum arg)
594
{
595 596 597
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

598
	Assert(MyProc != NULL);
599

600
	/*
B
Bruce Momjian 已提交
601 602
	 * Release any LW locks I am holding.  There really shouldn't be any, but
	 * it's cheap to check again before we cut the knees off the LWLock
603
	 * facility by releasing our PGPROC ...
604
	 */
605
	LWLockReleaseAll();
606

607
	SpinLockAcquire(ProcStructLock);
608

J
Jan Wieck 已提交
609
	/* Return PGPROC structure (and semaphore) to freelist */
610 611 612 613 614 615 616 617 618 619
	if (IsAutoVacuumWorkerProcess())
	{
		MyProc->links.next = procglobal->autovacFreeProcs;
		procglobal->autovacFreeProcs = MAKE_OFFSET(MyProc);
	}
	else
	{
		MyProc->links.next = procglobal->freeProcs;
		procglobal->freeProcs = MAKE_OFFSET(MyProc);
	}
620

J
Jan Wieck 已提交
621
	/* PGPROC struct isn't mine anymore */
622
	MyProc = NULL;
623

624 625 626
	/* Update shared estimate of spins_per_delay */
	procglobal->spins_per_delay = update_spins_per_delay(procglobal->spins_per_delay);

627
	SpinLockRelease(ProcStructLock);
628 629 630 631

	/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
	if (AutovacuumLauncherPid != 0)
		kill(AutovacuumLauncherPid, SIGUSR1);
632 633 634
}

/*
635 636 637
 * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
 *		processes (bgwriter, etc).	The PGPROC and sema are not released, only
 *		marked as not-in-use.
638 639
 */
static void
640
AuxiliaryProcKill(int code, Datum arg)
641
{
B
Bruce Momjian 已提交
642
	int			proctype = DatumGetInt32(arg);
643
	PGPROC	   *auxproc;
J
Jan Wieck 已提交
644

645
	Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
J
Jan Wieck 已提交
646

647
	auxproc = &AuxiliaryProcs[proctype];
J
Jan Wieck 已提交
648

649
	Assert(MyProc == auxproc);
650

651
	/* Release any LW locks I am holding (see notes above) */
652 653
	LWLockReleaseAll();

654 655
	SpinLockAcquire(ProcStructLock);

656
	/* Mark auxiliary proc no longer in use */
657 658
	MyProc->pid = 0;

J
Jan Wieck 已提交
659
	/* PGPROC struct isn't mine anymore */
660
	MyProc = NULL;
661 662 663 664 665

	/* Update shared estimate of spins_per_delay */
	ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);

	SpinLockRelease(ProcStructLock);
666 667
}

668

669 670
/*
 * ProcQueue package: routines for putting processes to sleep
671
 *		and  waking them up
672 673 674 675 676 677 678 679
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
680
#ifdef NOT_USED
681
PROC_QUEUE *
682 683
ProcQueueAlloc(char *name)
{
684 685
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
B
Bruce Momjian 已提交
686
	ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
687 688

	if (!queue)
689
		return NULL;
690 691
	if (!found)
		ProcQueueInit(queue);
692
	return queue;
693
}
694
#endif
695 696 697 698 699

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
700
ProcQueueInit(PROC_QUEUE *queue)
701
{
702 703
	SHMQueueInit(&(queue->links));
	queue->size = 0;
704 705 706 707
}


/*
708
 * ProcSleep -- put a process to sleep on the specified lock
709
 *
710 711
 * Caller must have set MyProc->heldLocks to reflect locks already held
 * on the lockable object by this process (under all XIDs).
712
 *
713
 * The lock table's partition lock must be held at entry, and will be held
714
 * at exit.
715
 *
716
 * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
717
 *
718
 * ASSUME: that no one will fiddle with the queue until after
719
 *		we release the partition lock.
720 721
 *
 * NOTES: The process queue is now a priority queue for locking.
722 723 724
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is normally zero, so when we try to acquire it, we sleep.
725 726
 */
int
727
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
728
{
729 730 731
	LOCKMODE	lockmode = locallock->tag.mode;
	LOCK	   *lock = locallock->lock;
	PROCLOCK   *proclock = locallock->proclock;
732 733
	uint32		hashcode = locallock->hashcode;
	LWLockId	partitionLock = LockHashPartitionLock(hashcode);
734
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
735
	LOCKMASK	myHeldLocks = MyProc->heldLocks;
736
	bool		early_deadlock = false;
737
	bool 		allow_autovacuum_cancel = true;
738
	int			myWaitStatus;
J
Jan Wieck 已提交
739
	PGPROC	   *proc;
740
	int			i;
741

742
	/*
743 744
	 * Determine where to add myself in the wait queue.
	 *
745 746 747 748
	 * Normally I should go at the end of the queue.  However, if I already
	 * hold locks that conflict with the request of any previous waiter, put
	 * myself in the queue just in front of the first such waiter. This is not
	 * a necessary step, since deadlock detection would move me to before that
B
Bruce Momjian 已提交
749 750
	 * waiter anyway; but it's relatively cheap to detect such a conflict
	 * immediately, and avoid delaying till deadlock timeout.
751
	 *
752 753
	 * Special case: if I find I should go in front of some waiter, check to
	 * see if I conflict with already-held locks or the requests before that
B
Bruce Momjian 已提交
754 755 756 757
	 * waiter.	If not, then just grant myself the requested lock immediately.
	 * This is the same as the test for immediate grant in LockAcquire, except
	 * we are only considering the part of the wait queue before my insertion
	 * point.
758 759
	 */
	if (myHeldLocks != 0)
V
Vadim B. Mikheev 已提交
760
	{
761
		LOCKMASK	aheadRequests = 0;
762

J
Jan Wieck 已提交
763
		proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
764
		for (i = 0; i < waitQueue->size; i++)
V
Vadim B. Mikheev 已提交
765
		{
766
			/* Must he wait for me? */
B
Bruce Momjian 已提交
767
			if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
V
Vadim B. Mikheev 已提交
768
			{
769
				/* Must I wait for him ? */
B
Bruce Momjian 已提交
770
				if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
771
				{
772
					/*
B
Bruce Momjian 已提交
773 774 775 776 777
					 * Yes, so we have a deadlock.	Easiest way to clean up
					 * correctly is to call RemoveFromWaitQueue(), but we
					 * can't do that until we are *on* the wait queue. So, set
					 * a flag to check below, and break out of loop.  Also,
					 * record deadlock info for later message.
778
					 */
779
					RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
780 781
					early_deadlock = true;
					break;
782
				}
783
				/* I must go before this waiter.  Check special case. */
B
Bruce Momjian 已提交
784
				if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
785 786 787
					LockCheckConflicts(lockMethodTable,
									   lockmode,
									   lock,
788
									   proclock,
789
									   MyProc) == STATUS_OK)
790
				{
791
					/* Skip the wait and just grant myself the lock. */
792
					GrantLock(lock, proclock, lockmode);
793
					GrantAwaitedLock();
794
					return STATUS_OK;
795 796
				}
				/* Break out of loop to put myself before him */
V
Vadim B. Mikheev 已提交
797
				break;
798
			}
799
			/* Nope, so advance to next waiter */
800
			aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
J
Jan Wieck 已提交
801
			proc = (PGPROC *) MAKE_PTR(proc->links.next);
V
Vadim B. Mikheev 已提交
802
		}
B
Bruce Momjian 已提交
803

804
		/*
B
Bruce Momjian 已提交
805 806
		 * If we fall out of loop normally, proc points to waitQueue head, so
		 * we will insert at tail of queue as desired.
807
		 */
808 809 810 811
	}
	else
	{
		/* I hold no locks, so I can't push in front of anyone. */
J
Jan Wieck 已提交
812
		proc = (PGPROC *) &(waitQueue->links);
V
Vadim B. Mikheev 已提交
813
	}
814

815
	/*
B
Bruce Momjian 已提交
816
	 * Insert self into queue, ahead of the given proc (or at tail of queue).
817
	 */
818
	SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
819
	waitQueue->size++;
820

821
	lock->waitMask |= LOCKBIT_ON(lockmode);
822

J
Jan Wieck 已提交
823
	/* Set up wait information in PGPROC object, too */
824
	MyProc->waitLock = lock;
825
	MyProc->waitProcLock = proclock;
826 827
	MyProc->waitLockMode = lockmode;

828
	MyProc->waitStatus = STATUS_WAITING;
829 830

	/*
B
Bruce Momjian 已提交
831 832 833
	 * If we detected deadlock, give up without waiting.  This must agree with
	 * CheckDeadLock's recovery code, except that we shouldn't release the
	 * semaphore since we haven't tried to lock it yet.
834 835 836
	 */
	if (early_deadlock)
	{
837
		RemoveFromWaitQueue(MyProc, hashcode);
838 839
		return STATUS_ERROR;
	}
840

841
	/* mark that we are waiting for a lock */
842
	lockAwaited = locallock;
843

844
	/*
845
	 * Release the lock table's partition lock.
846
	 *
847
	 * NOTE: this may also cause us to exit critical-section state, possibly
B
Bruce Momjian 已提交
848 849
	 * allowing a cancel/die interrupt to be accepted. This is OK because we
	 * have recorded the fact that we are waiting for a lock, and so
850
	 * LockWaitCancel will clean up if cancel/die happens.
851
	 */
852
	LWLockRelease(partitionLock);
853

854 855 856
	/* Reset deadlock_state before enabling the signal handler */
	deadlock_state = DS_NOT_YET_CHECKED;

857
	/*
B
Bruce Momjian 已提交
858 859 860 861
	 * Set timer so we can wake up after awhile and check for a deadlock. If a
	 * deadlock is detected, the handler releases the process's semaphore and
	 * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
	 * must report failure rather than success.
862
	 *
863 864
	 * By delaying the check until we've waited for a bit, we can avoid
	 * running the rather expensive deadlock-check code in most cases.
865
	 */
866
	if (!enable_sig_alarm(DeadlockTimeout, false))
867
		elog(FATAL, "could not set timer for process wakeup");
868

869
	/*
870
	 * If someone wakes us between LWLockRelease and PGSemaphoreLock,
B
Bruce Momjian 已提交
871
	 * PGSemaphoreLock will not block.	The wakeup is "saved" by the semaphore
B
Bruce Momjian 已提交
872 873 874 875 876
	 * implementation.	While this is normally good, there are cases where a
	 * saved wakeup might be leftover from a previous operation (for example,
	 * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
	 * So, loop to wait again if the waitStatus shows we haven't been granted
	 * nor denied the lock yet.
877
	 *
878 879 880 881 882 883 884
	 * We pass interruptOK = true, which eliminates a window in which
	 * cancel/die interrupts would be held off undesirably.  This is a promise
	 * that we don't mind losing control to a cancel/die interrupt here.  We
	 * don't, because we have no shared-state-change work to do after being
	 * granted the lock (the grantor did it all).  We do have to worry about
	 * updating the locallock table, but if we lose control to an error,
	 * LockWaitCancel will fix that up.
885
	 */
B
Bruce Momjian 已提交
886 887
	do
	{
888
		PGSemaphoreLock(&MyProc->sem, true);
889

890 891 892 893 894 895 896
		/*
		 * waitStatus could change from STATUS_WAITING to something else
		 * asynchronously.  Read it just once per loop to prevent surprising
		 * behavior (such as missing log messages).
		 */
		myWaitStatus = MyProc->waitStatus;

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
		/*
		 * If we are not deadlocked, but are waiting on an autovacuum-induced
		 * task, send a signal to interrupt it.  
		 */
		if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
		{
			PGPROC	*autovac = GetBlockingAutoVacuumPgproc();

			LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);

			/*
			 * Only do it if the worker is not working to protect against Xid
			 * wraparound.
			 */
			if ((autovac != NULL) &&
				(autovac->vacuumFlags & PROC_IS_AUTOVACUUM) &&
				!(autovac->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
			{
				int		pid = autovac->pid;

				elog(DEBUG2, "sending cancel to blocking autovacuum pid = %d",
					 pid);

				/* don't hold the lock across the kill() syscall */
				LWLockRelease(ProcArrayLock);

				/* send the autovacuum worker Back to Old Kent Road */
				if (kill(pid, SIGINT) < 0)
				{
					/* Just a warning to allow multiple callers */
					ereport(WARNING,
							(errmsg("could not send signal to process %d: %m",
									pid)));
				}
			}
			else
				LWLockRelease(ProcArrayLock);

			/* prevent signal from being resent more than once */
			allow_autovacuum_cancel = false;
		}

939 940 941 942
		/*
		 * If awoken after the deadlock check interrupt has run, and
		 * log_lock_waits is on, then report about the wait.
		 */
943
		if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
944
		{
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
			StringInfoData buf;
			const char *modename;
			long		secs;
			int			usecs;
			long		msecs;

			initStringInfo(&buf);
			DescribeLockTag(&buf, &locallock->tag.lock);
			modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
									   lockmode);
			TimestampDifference(timeout_start_time, GetCurrentTimestamp(),
								&secs, &usecs);
			msecs = secs * 1000 + usecs / 1000;
			usecs = usecs % 1000;

			if (deadlock_state == DS_SOFT_DEADLOCK)
				ereport(LOG,
						(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
								MyProcPid, modename, buf.data, msecs, usecs)));
			else if (deadlock_state == DS_HARD_DEADLOCK)
965
			{
966 967 968 969 970 971 972 973 974 975
				/*
				 * This message is a bit redundant with the error that will
				 * be reported subsequently, but in some cases the error
				 * report might not make it to the log (eg, if it's caught by
				 * an exception handler), and we want to ensure all long-wait
				 * events get logged.
				 */
				ereport(LOG,
						(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
								MyProcPid, modename, buf.data, msecs, usecs)));
976
			}
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009

			if (myWaitStatus == STATUS_WAITING)
				ereport(LOG,
						(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
								MyProcPid, modename, buf.data, msecs, usecs)));
			else if (myWaitStatus == STATUS_OK)
				ereport(LOG,
						(errmsg("process %d acquired %s on %s after %ld.%03d ms",
								MyProcPid, modename, buf.data, msecs, usecs)));
			else
			{
				Assert(myWaitStatus == STATUS_ERROR);
				/*
				 * Currently, the deadlock checker always kicks its own
				 * process, which means that we'll only see STATUS_ERROR
				 * when deadlock_state == DS_HARD_DEADLOCK, and there's no
				 * need to print redundant messages.  But for completeness
				 * and future-proofing, print a message if it looks like
				 * someone else kicked us off the lock.
				 */
				if (deadlock_state != DS_HARD_DEADLOCK)
					ereport(LOG,
							(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
									MyProcPid, modename, buf.data, msecs, usecs)));
			}

			/*
			 * At this point we might still need to wait for the lock.
			 * Reset state so we don't print the above messages again.
			 */
			deadlock_state = DS_NO_DEADLOCK;

			pfree(buf.data);
1010
		}
1011
	} while (myWaitStatus == STATUS_WAITING);
1012

1013
	/*
1014
	 * Disable the timer, if it's still running
B
Bruce Momjian 已提交
1015
	 */
1016
	if (!disable_sig_alarm(false))
1017
		elog(FATAL, "could not disable timer for process wakeup");
B
Bruce Momjian 已提交
1018

1019
	/*
B
Bruce Momjian 已提交
1020 1021 1022
	 * Re-acquire the lock table's partition lock.  We have to do this to hold
	 * off cancel/die interrupts before we can mess with lockAwaited (else we
	 * might have a missed or duplicated locallock update).
1023
	 */
1024
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1025 1026 1027

	/*
	 * We no longer want LockWaitCancel to do anything.
1028
	 */
1029
	lockAwaited = NULL;
1030

1031
	/*
1032
	 * If we got the lock, be sure to remember it in the locallock table.
1033
	 */
1034
	if (MyProc->waitStatus == STATUS_OK)
1035
		GrantAwaitedLock();
1036

1037 1038 1039 1040
	/*
	 * We don't have to do anything else, because the awaker did all the
	 * necessary update of the lock table and MyProc.
	 */
1041
	return MyProc->waitStatus;
1042 1043 1044 1045 1046 1047
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
1048
 *	 Also remove the process from the wait queue and set its links invalid.
1049
 *	 RETURN: the next process in the wait queue.
1050
 *
1051 1052
 * The appropriate lock partition lock must be held by caller.
 *
1053 1054 1055
 * XXX: presently, this code is only used for the "success" case, and only
 * works correctly for that case.  To clean up in failure case, would need
 * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1056
 * Hence, in practice the waitStatus parameter must be STATUS_OK.
1057
 */
J
Jan Wieck 已提交
1058
PGPROC *
1059
ProcWakeup(PGPROC *proc, int waitStatus)
1060
{
J
Jan Wieck 已提交
1061
	PGPROC	   *retProc;
1062

1063
	/* Proc should be sleeping ... */
1064 1065
	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
1066
		return NULL;
1067
	Assert(proc->waitStatus == STATUS_WAITING);
1068

1069
	/* Save next process before we zap the list link */
J
Jan Wieck 已提交
1070
	retProc = (PGPROC *) MAKE_PTR(proc->links.next);
1071

1072
	/* Remove process from wait queue */
1073
	SHMQueueDelete(&(proc->links));
1074
	(proc->waitLock->waitProcs.size)--;
1075

1076 1077
	/* Clean up process' state and pass it the ok/fail signal */
	proc->waitLock = NULL;
1078
	proc->waitProcLock = NULL;
1079
	proc->waitStatus = waitStatus;
1080

1081
	/* And awaken it */
1082
	PGSemaphoreUnlock(&proc->sem);
1083 1084

	return retProc;
1085 1086 1087 1088
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
1089 1090
 *		released (or a prior waiter is aborted).  Scan all waiters
 *		for lock, waken any that are no longer blocked.
1091 1092
 *
 * The appropriate lock partition lock must be held by caller.
1093
 */
1094
void
1095
ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1096
{
1097 1098
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
	int			queue_size = waitQueue->size;
J
Jan Wieck 已提交
1099
	PGPROC	   *proc;
1100
	LOCKMASK	aheadRequests = 0;
M
 
Marc G. Fournier 已提交
1101

1102
	Assert(queue_size >= 0);
1103

1104 1105
	if (queue_size == 0)
		return;
1106

J
Jan Wieck 已提交
1107
	proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
1108

1109 1110
	while (queue_size-- > 0)
	{
B
Bruce Momjian 已提交
1111
		LOCKMODE	lockmode = proc->waitLockMode;
M
 
Marc G. Fournier 已提交
1112 1113

		/*
B
Bruce Momjian 已提交
1114 1115
		 * Waken if (a) doesn't conflict with requests of earlier waiters, and
		 * (b) doesn't conflict with already-held locks.
M
 
Marc G. Fournier 已提交
1116
		 */
B
Bruce Momjian 已提交
1117
		if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1118 1119 1120
			LockCheckConflicts(lockMethodTable,
							   lockmode,
							   lock,
1121
							   proc->waitProcLock,
1122
							   proc) == STATUS_OK)
M
 
Marc G. Fournier 已提交
1123
		{
1124
			/* OK to waken */
1125
			GrantLock(lock, proc->waitProcLock, lockmode);
1126
			proc = ProcWakeup(proc, STATUS_OK);
B
Bruce Momjian 已提交
1127

1128
			/*
B
Bruce Momjian 已提交
1129 1130 1131
			 * ProcWakeup removes proc from the lock's waiting process queue
			 * and returns the next proc in chain; don't use proc's next-link,
			 * because it's been cleared.
1132
			 */
M
 
Marc G. Fournier 已提交
1133
		}
1134
		else
1135
		{
B
Bruce Momjian 已提交
1136
			/*
B
Bruce Momjian 已提交
1137
			 * Cannot wake this guy. Remember his request for later checks.
B
Bruce Momjian 已提交
1138
			 */
1139
			aheadRequests |= LOCKBIT_ON(lockmode);
J
Jan Wieck 已提交
1140
			proc = (PGPROC *) MAKE_PTR(proc->links.next);
1141
		}
M
 
Marc G. Fournier 已提交
1142
	}
1143 1144

	Assert(waitQueue->size >= 0);
1145 1146
}

1147 1148 1149
/*
 * CheckDeadLock
 *
1150
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
1151 1152
 * while waiting for a lock to be released by some other process.  Look
 * to see if there's a deadlock; if not, just return and continue waiting.
1153
 * (But signal ProcSleep to log a message, if log_lock_waits is true.)
1154 1155
 * If we have a real deadlock, remove ourselves from the lock's wait queue
 * and signal an error to ProcSleep.
1156 1157 1158
 *
 * NB: this is run inside a signal handler, so be very wary about what is done
 * here or in called routines.
1159
 */
1160
static void
1161
CheckDeadLock(void)
1162
{
1163 1164
	int			i;

1165
	/*
B
Bruce Momjian 已提交
1166 1167
	 * Acquire exclusive lock on the entire shared lock data structures. Must
	 * grab LWLocks in partition-number order to avoid LWLock deadlock.
1168 1169 1170 1171 1172 1173
	 *
	 * Note that the deadlock check interrupt had better not be enabled
	 * anywhere that this process itself holds lock partition locks, else this
	 * will wait forever.  Also note that LWLockAcquire creates a critical
	 * section, so that this routine cannot be interrupted by cancel/die
	 * interrupts.
1174
	 */
1175 1176
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		LWLockAcquire(FirstLockMgrLock + i, LW_EXCLUSIVE);
1177

1178
	/*
1179 1180
	 * Check to see if we've been awoken by anyone in the interim.
	 *
1181 1182 1183
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to us so
	 * we know that we don't have to wait anymore.
1184
	 *
1185
	 * We check by looking to see if we've been unlinked from the wait queue.
B
Bruce Momjian 已提交
1186
	 * This is quicker than checking our semaphore's state, since no kernel
1187
	 * call is needed, and it is safe because we hold the lock partition lock.
1188
	 */
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
		goto check_done;

#ifdef LOCK_DEBUG
	if (Debug_deadlocks)
		DumpAllLocks();
#endif

	/* Run the deadlock check, and set deadlock_state for use by ProcSleep */
	deadlock_state = DeadLockCheck(MyProc);

1201
	if (deadlock_state == DS_HARD_DEADLOCK)
B
Bruce Momjian 已提交
1202
	{
1203 1204 1205
		/*
		 * Oops.  We have a deadlock.
		 *
1206 1207 1208 1209
		 * Get this process out of wait state. (Note: we could do this more
		 * efficiently by relying on lockAwaited, but use this coding to
		 * preserve the flexibility to kill some other transaction than the
		 * one detecting the deadlock.)
1210 1211
		 *
		 * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
1212 1213
		 * ProcSleep will report an error after we return from the signal
		 * handler.
1214 1215 1216
		 */
		Assert(MyProc->waitLock != NULL);
		RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1217

1218 1219 1220 1221 1222
		/*
		 * Unlock my semaphore so that the interrupted ProcSleep() call can
		 * finish.
		 */
		PGSemaphoreUnlock(&MyProc->sem);
1223

1224
		/*
1225 1226 1227 1228 1229 1230 1231 1232
		 * We're done here.  Transaction abort caused by the error that
		 * ProcSleep will raise will cause any other locks we hold to be
		 * released, thus allowing other processes to wake up; we don't need
		 * to do that here.  NOTE: an exception is that releasing locks we
		 * hold doesn't consider the possibility of waiters that were blocked
		 * behind us on the lock we just failed to get, and might now be
		 * wakable because we're not in front of them anymore.  However,
		 * RemoveFromWaitQueue took care of waking up any such processes.
1233 1234
		 */
	}
1235
	else if (log_lock_waits || deadlock_state == DS_BLOCKED_BY_AUTOVACUUM)
1236 1237 1238 1239 1240 1241
	{
		/*
		 * Unlock my semaphore so that the interrupted ProcSleep() call can
		 * print the log message (we daren't do it here because we are inside
		 * a signal handler).  It will then sleep again until someone
		 * releases the lock.
1242 1243 1244
		 *
		 * If blocked by autovacuum, this wakeup will enable ProcSleep to send
		 * the cancelling signal to the autovacuum worker.
1245 1246 1247
		 */
		PGSemaphoreUnlock(&MyProc->sem);
	}
1248 1249

	/*
1250 1251 1252 1253 1254
	 * And release locks.  We do this in reverse order for two reasons:
	 * (1) Anyone else who needs more than one of the locks will be trying
	 * to lock them in increasing order; we don't want to release the other
	 * process until it can get all the locks it needs.
	 * (2) This avoids O(N^2) behavior inside LWLockRelease.
1255
	 */
1256
check_done:
B
Bruce Momjian 已提交
1257
	for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1258
		LWLockRelease(FirstLockMgrLock + i);
1259 1260 1261
}


1262 1263 1264 1265 1266 1267
/*
 * ProcWaitForSignal - wait for a signal from another backend.
 *
 * This can share the semaphore normally used for waiting for locks,
 * since a backend could never be waiting for a lock and a signal at
 * the same time.  As with locks, it's OK if the signal arrives just
B
Bruce Momjian 已提交
1268
 * before we actually reach the waiting state.	Also as with locks,
1269 1270
 * it's necessary that the caller be robust against bogus wakeups:
 * always check that the desired state has occurred, and wait again
B
Bruce Momjian 已提交
1271
 * if not.	This copes with possible "leftover" wakeups.
1272 1273 1274 1275
 */
void
ProcWaitForSignal(void)
{
1276
	PGSemaphoreLock(&MyProc->sem, true);
1277 1278 1279
}

/*
1280
 * ProcSendSignal - send a signal to a backend identified by PID
1281 1282
 */
void
1283
ProcSendSignal(int pid)
1284
{
1285
	PGPROC	   *proc = BackendPidGetProc(pid);
1286 1287

	if (proc != NULL)
1288
		PGSemaphoreUnlock(&proc->sem);
1289 1290 1291
}


1292 1293 1294 1295 1296 1297 1298 1299 1300
/*****************************************************************************
 * SIGALRM interrupt support
 *
 * Maybe these should be in pqsignal.c?
 *****************************************************************************/

/*
 * Enable the SIGALRM interrupt to fire after the specified delay
 *
1301
 * Delay is given in milliseconds.	Caller should be sure a SIGALRM
1302 1303
 * signal handler is installed before this is called.
 *
1304 1305
 * This code properly handles nesting of deadlock timeout alarms within
 * statement timeout alarms.
1306
 *
1307 1308 1309
 * Returns TRUE if okay, FALSE on failure.
 */
bool
1310
enable_sig_alarm(int delayms, bool is_statement_timeout)
1311
{
1312
	TimestampTz fin_time;
1313
	struct itimerval timeval;
1314

1315 1316
	if (is_statement_timeout)
	{
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
		/*
		 * Begin statement-level timeout
		 *
		 * Note that we compute statement_fin_time with reference to the
		 * statement_timestamp, but apply the specified delay without any
		 * correction; that is, we ignore whatever time has elapsed since
		 * statement_timestamp was set.  In the normal case only a small
		 * interval will have elapsed and so this doesn't matter, but there
		 * are corner cases (involving multi-statement query strings with
		 * embedded COMMIT or ROLLBACK) where we might re-initialize the
B
Bruce Momjian 已提交
1327 1328 1329 1330
		 * statement timeout long after initial receipt of the message. In
		 * such cases the enforcement of the statement timeout will be a bit
		 * inconsistent.  This annoyance is judged not worth the cost of
		 * performing an additional gettimeofday() here.
1331
		 */
1332
		Assert(!deadlock_timeout_active);
1333 1334
		fin_time = GetCurrentStatementStartTimestamp();
		fin_time = TimestampTzPlusMilliseconds(fin_time, delayms);
1335
		statement_fin_time = fin_time;
1336
		cancel_from_timeout = false;
1337
		statement_timeout_active = true;
1338 1339 1340 1341 1342 1343
	}
	else if (statement_timeout_active)
	{
		/*
		 * Begin deadlock timeout with statement-level timeout active
		 *
1344 1345 1346 1347
		 * Here, we want to interrupt at the closer of the two timeout times.
		 * If fin_time >= statement_fin_time then we need not touch the
		 * existing timer setting; else set up to interrupt at the deadlock
		 * timeout time.
1348 1349 1350
		 *
		 * NOTE: in this case it is possible that this routine will be
		 * interrupted by the previously-set timer alarm.  This is okay
B
Bruce Momjian 已提交
1351 1352 1353
		 * because the signal handler will do only what it should do according
		 * to the state variables.	The deadlock checker may get run earlier
		 * than normal, but that does no harm.
1354
		 */
1355 1356
		timeout_start_time = GetCurrentTimestamp();
		fin_time = TimestampTzPlusMilliseconds(timeout_start_time, delayms);
1357
		deadlock_timeout_active = true;
1358
		if (fin_time >= statement_fin_time)
1359 1360 1361 1362 1363 1364
			return true;
	}
	else
	{
		/* Begin deadlock timeout with no statement-level timeout */
		deadlock_timeout_active = true;
1365 1366 1367
		/* GetCurrentTimestamp can be expensive, so only do it if we must */
		if (log_lock_waits)
			timeout_start_time = GetCurrentTimestamp();
1368
	}
1369

1370
	/* If we reach here, okay to set the timer interrupt */
1371
	MemSet(&timeval, 0, sizeof(struct itimerval));
1372 1373
	timeval.it_value.tv_sec = delayms / 1000;
	timeval.it_value.tv_usec = (delayms % 1000) * 1000;
1374
	if (setitimer(ITIMER_REAL, &timeval, NULL))
1375
		return false;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
	return true;
}

/*
 * Cancel the SIGALRM timer, either for a deadlock timeout or a statement
 * timeout.  If a deadlock timeout is canceled, any active statement timeout
 * remains in force.
 *
 * Returns TRUE if okay, FALSE on failure.
 */
bool
disable_sig_alarm(bool is_statement_timeout)
{
	/*
	 * Always disable the interrupt if it is active; this avoids being
	 * interrupted by the signal handler and thereby possibly getting
	 * confused.
	 *
	 * We will re-enable the interrupt if necessary in CheckStatementTimeout.
	 */
	if (statement_timeout_active || deadlock_timeout_active)
1397
	{
1398
		struct itimerval timeval;
1399

1400
		MemSet(&timeval, 0, sizeof(struct itimerval));
1401
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1402
		{
1403 1404 1405
			statement_timeout_active = false;
			cancel_from_timeout = false;
			deadlock_timeout_active = false;
1406 1407
			return false;
		}
1408 1409
	}

1410 1411 1412 1413
	/* Always cancel deadlock timeout, in case this is error cleanup */
	deadlock_timeout_active = false;

	/* Cancel or reschedule statement timeout */
1414
	if (is_statement_timeout)
1415
	{
1416
		statement_timeout_active = false;
1417 1418
		cancel_from_timeout = false;
	}
1419 1420 1421 1422 1423
	else if (statement_timeout_active)
	{
		if (!CheckStatementTimeout())
			return false;
	}
1424 1425 1426
	return true;
}

1427

1428
/*
1429 1430 1431
 * Check for statement timeout.  If the timeout time has come,
 * trigger a query-cancel interrupt; if not, reschedule the SIGALRM
 * interrupt to occur at the right time.
1432
 *
1433
 * Returns true if okay, false if failed to set the interrupt.
1434
 */
1435 1436
static bool
CheckStatementTimeout(void)
1437
{
1438
	TimestampTz now;
B
Bruce Momjian 已提交
1439

1440 1441 1442
	if (!statement_timeout_active)
		return true;			/* do nothing if not active */

1443
	now = GetCurrentTimestamp();
1444

1445
	if (now >= statement_fin_time)
1446
	{
1447 1448
		/* Time to die */
		statement_timeout_active = false;
1449
		cancel_from_timeout = true;
1450 1451 1452 1453
#ifdef HAVE_SETSID
		/* try to signal whole process group */
		kill(-MyProcPid, SIGINT);
#endif
1454
		kill(MyProcPid, SIGINT);
1455 1456 1457 1458
	}
	else
	{
		/* Not time yet, so (re)schedule the interrupt */
1459 1460
		long		secs;
		int			usecs;
1461 1462
		struct itimerval timeval;

1463 1464
		TimestampDifference(now, statement_fin_time,
							&secs, &usecs);
B
Bruce Momjian 已提交
1465

1466 1467 1468 1469 1470 1471
		/*
		 * It's possible that the difference is less than a microsecond;
		 * ensure we don't cancel, rather than set, the interrupt.
		 */
		if (secs == 0 && usecs == 0)
			usecs = 1;
1472
		MemSet(&timeval, 0, sizeof(struct itimerval));
1473 1474
		timeval.it_value.tv_sec = secs;
		timeval.it_value.tv_usec = usecs;
1475
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1476 1477 1478
			return false;
	}

1479 1480
	return true;
}
1481 1482 1483


/*
1484 1485 1486 1487 1488 1489
 * Signal handler for SIGALRM
 *
 * Process deadlock check and/or statement timeout check, as needed.
 * To avoid various edge cases, we must be careful to do nothing
 * when there is nothing to be done.  We also need to be able to
 * reschedule the timer interrupt if called before end of statement.
1490 1491 1492 1493
 */
void
handle_sig_alarm(SIGNAL_ARGS)
{
1494 1495 1496
	int			save_errno = errno;

	if (deadlock_timeout_active)
1497
	{
1498
		deadlock_timeout_active = false;
1499 1500
		CheckDeadLock();
	}
1501 1502 1503 1504 1505

	if (statement_timeout_active)
		(void) CheckStatementTimeout();

	errno = save_errno;
1506
}