proc.c 36.9 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
6
 * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
B
Add:  
Bruce Momjian 已提交
7
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.178 2006/07/23 23:08:46 tgl Exp $
12 13 14 15 16
 *
 *-------------------------------------------------------------------------
 */
/*
 * Interface (a):
17
 *		ProcSleep(), ProcWakeup(),
18 19
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
20
 *
21 22
 * Waiting for a lock causes the backend to be put to sleep.  Whoever releases
 * the lock wakes the process up again (and gives it an error code so it knows
23 24 25 26
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
27 28
 * ProcReleaseLocks -- frees the locks associated with current transaction
 *
29
 * ProcKill -- destroys the shared memory state (and locks)
30
 * associated with the process.
31
 */
32 33
#include "postgres.h"

34
#include <signal.h>
35 36
#include <unistd.h>
#include <sys/time.h>
M
Marc G. Fournier 已提交
37

38
#include "access/transam.h"
39
#include "access/xact.h"
40
#include "miscadmin.h"
41
#include "storage/ipc.h"
42
#include "storage/proc.h"
43
#include "storage/procarray.h"
44
#include "storage/spin.h"
45

46

47
/* GUC variables */
B
Bruce Momjian 已提交
48
int			DeadlockTimeout = 1000;
49
int			StatementTimeout = 0;
M
 
Marc G. Fournier 已提交
50

51
/* Pointer to this process's PGPROC struct, if any */
J
Jan Wieck 已提交
52
PGPROC	   *MyProc = NULL;
53 54

/*
J
Jan Wieck 已提交
55
 * This spinlock protects the freelist of recycled PGPROC structures.
56
 * We cannot use an LWLock because the LWLock manager depends on already
J
Jan Wieck 已提交
57
 * having a PGPROC and a wait semaphore!  But these structures are touched
58 59
 * relatively infrequently (only at backend startup or shutdown) and not for
 * very long, so a spinlock is okay.
60
 */
61
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
62

63
/* Pointers to shared-memory structures */
64 65
NON_EXEC_STATIC PROC_HDR *ProcGlobal = NULL;
NON_EXEC_STATIC PGPROC *DummyProcs = NULL;
66

67 68
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
69

70 71 72
/* Mark these volatile because they can be changed by signal handler */
static volatile bool statement_timeout_active = false;
static volatile bool deadlock_timeout_active = false;
73
volatile bool cancel_from_timeout = false;
B
Bruce Momjian 已提交
74

75
/* statement_fin_time is valid only if statement_timeout_active is true */
76
static TimestampTz statement_fin_time;
77 78


79
static void RemoveProcFromArray(int code, Datum arg);
80 81
static void ProcKill(int code, Datum arg);
static void DummyProcKill(int code, Datum arg);
82
static bool CheckStatementTimeout(void);
83

V
Vadim B. Mikheev 已提交
84

85 86 87
/*
 * Report shared-memory space needed by InitProcGlobal.
 */
88
Size
89
ProcGlobalShmemSize(void)
90
{
91 92 93 94 95 96 97 98 99 100
	Size		size = 0;

	/* ProcGlobal */
	size = add_size(size, sizeof(PROC_HDR));
	/* DummyProcs */
	size = add_size(size, mul_size(NUM_DUMMY_PROCS, sizeof(PGPROC)));
	/* MyProcs */
	size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
	/* ProcStructLock */
	size = add_size(size, sizeof(slock_t));
101 102 103 104

	return size;
}

105 106 107 108
/*
 * Report number of semaphores needed by InitProcGlobal.
 */
int
109
ProcGlobalSemas(void)
110
{
111
	/* We need a sema per backend, plus one for each dummy process. */
112
	return MaxBackends + NUM_DUMMY_PROCS;
113 114
}

115 116
/*
 * InitProcGlobal -
117 118
 *	  Initialize the global process table during postmaster or standalone
 *	  backend startup.
119
 *
120
 *	  We also create all the per-process semaphores we will need to support
121 122 123 124 125 126 127 128 129
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
 *	  MaxBackends higher than his kernel will support, he'll find out sooner
 *	  rather than later.
130 131 132 133
 *
 *	  Another reason for creating semaphores here is that the semaphore
 *	  implementation typically requires us to create semaphores in the
 *	  postmaster, not in backends.
134 135 136 137
 *
 * Note: this is NOT called by individual backends under a postmaster,
 * not even in the EXEC_BACKEND case.  The ProcGlobal and DummyProcs
 * pointers must be propagated specially for EXEC_BACKEND operation.
138 139
 */
void
140
InitProcGlobal(void)
141
{
142 143 144
	PGPROC	   *procs;
	int			i;
	bool		found;
145

146
	/* Create the ProcGlobal shared structure */
147
	ProcGlobal = (PROC_HDR *)
148 149
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
	Assert(!found);
150

151
	/*
152 153
	 * Create the PGPROC structures for dummy (bgwriter) processes, too.
	 * These do not get linked into the freeProcs list.
154
	 */
155
	DummyProcs = (PGPROC *)
156
		ShmemInitStruct("DummyProcs", NUM_DUMMY_PROCS * sizeof(PGPROC),
157 158
						&found);
	Assert(!found);
159

160 161 162 163
	/*
	 * Initialize the data structures.
	 */
	ProcGlobal->freeProcs = INVALID_OFFSET;
164

165
	ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	/*
	 * Pre-create the PGPROC structures and create a semaphore for each.
	 */
	procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC));
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
	MemSet(procs, 0, MaxBackends * sizeof(PGPROC));
	for (i = 0; i < MaxBackends; i++)
	{
		PGSemaphoreCreate(&(procs[i].sem));
		procs[i].links.next = ProcGlobal->freeProcs;
		ProcGlobal->freeProcs = MAKE_OFFSET(&procs[i]);
	}
182

183 184 185 186 187
	MemSet(DummyProcs, 0, NUM_DUMMY_PROCS * sizeof(PGPROC));
	for (i = 0; i < NUM_DUMMY_PROCS; i++)
	{
		DummyProcs[i].pid = 0;		/* marks dummy proc as not in use */
		PGSemaphoreCreate(&(DummyProcs[i].sem));
188
	}
189 190 191 192

	/* Create ProcStructLock spinlock, too */
	ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
	SpinLockInit(ProcStructLock);
193 194
}

195
/*
196
 * InitProcess -- initialize a per-process data structure for this backend
197 198
 */
void
199
InitProcess(void)
200
{
201 202
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;
203 204
	SHMEM_OFFSET myOffset;
	int			i;
205 206

	/*
207 208
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
209
	 */
210
	if (procglobal == NULL)
211
		elog(PANIC, "proc header uninitialized");
212 213

	if (MyProc != NULL)
214
		elog(ERROR, "you already exist");
215

216
	/*
B
Bruce Momjian 已提交
217 218
	 * Try to get a proc struct from the free list.  If this fails, we must be
	 * out of PGPROC structures (not to mention semaphores).
219
	 *
B
Bruce Momjian 已提交
220 221
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
222
	 */
223
	SpinLockAcquire(ProcStructLock);
224

225 226
	set_spins_per_delay(procglobal->spins_per_delay);

227
	myOffset = procglobal->freeProcs;
228 229

	if (myOffset != INVALID_OFFSET)
230
	{
J
Jan Wieck 已提交
231
		MyProc = (PGPROC *) MAKE_PTR(myOffset);
232
		procglobal->freeProcs = MyProc->links.next;
233
		SpinLockRelease(ProcStructLock);
234 235 236 237
	}
	else
	{
		/*
B
Bruce Momjian 已提交
238 239 240
		 * If we reach here, all the PGPROCs are in use.  This is one of the
		 * possible places to detect "too many backends", so give the standard
		 * error message.
241
		 */
242
		SpinLockRelease(ProcStructLock);
243 244 245
		ereport(FATAL,
				(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
				 errmsg("sorry, too many clients already")));
246
	}
247

248
	/*
B
Bruce Momjian 已提交
249 250
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
251
	 */
252
	SHMQueueElemInit(&(MyProc->links));
253
	MyProc->waitStatus = STATUS_OK;
254
	MyProc->xid = InvalidTransactionId;
255
	MyProc->xmin = InvalidTransactionId;
256
	MyProc->pid = MyProcPid;
257 258
	/* databaseId and roleId will be filled in later */
	MyProc->databaseId = InvalidOid;
259
	MyProc->roleId = InvalidOid;
260 261 262
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
263
	MyProc->waitLock = NULL;
264
	MyProc->waitProcLock = NULL;
265 266
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
267

268
	/*
269
	 * We might be reusing a semaphore that belonged to a failed process. So
270 271
	 * be careful and reinitialize its value here.  (This is not strictly
	 * necessary anymore, but seems like a good idea for cleanliness.)
272
	 */
273
	PGSemaphoreReset(&MyProc->sem);
274

275
	/*
276
	 * Arrange to clean up at backend exit.
277
	 */
278
	on_shmem_exit(ProcKill, 0);
279 280

	/*
B
Bruce Momjian 已提交
281 282
	 * Now that we have a PGPROC, we could try to acquire locks, so initialize
	 * the deadlock checker.
283 284
	 */
	InitDeadLockChecking();
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
 *
 * This is separate from InitProcess because we can't acquire LWLocks until
 * we've created a PGPROC, but in the EXEC_BACKEND case there is a good deal
 * of stuff to be done before this step that will require LWLock access.
 */
void
InitProcessPhase2(void)
{
	Assert(MyProc != NULL);

	/*
	 * We should now know what database we're in, so advertise that.  (We
	 * need not do any locking here, since no other backend can yet see
	 * our PGPROC.)
	 */
	Assert(OidIsValid(MyDatabaseId));
	MyProc->databaseId = MyDatabaseId;

	/*
	 * Add our PGPROC to the PGPROC array in shared memory.
	 */
	ProcArrayAdd(MyProc);

	/*
	 * Arrange to clean that up at backend exit.
	 */
	on_shmem_exit(RemoveProcFromArray, 0);
}

318 319 320
/*
 * InitDummyProcess -- create a dummy per-process data structure
 *
321 322
 * This is called by bgwriter and similar processes so that they will have a
 * MyProc value that's real enough to let them wait for LWLocks.  The PGPROC
323
 * and sema that are assigned are one of the extra ones created during
324
 * InitProcGlobal.
325 326
 *
 * Dummy processes are presently not expected to wait for real (lockmgr)
327 328
 * locks, so we need not set up the deadlock checker.  They are never added
 * to the ProcArray or the sinval messaging mechanism, either.
329 330
 */
void
331
InitDummyProcess(void)
332
{
B
Bruce Momjian 已提交
333
	PGPROC	   *dummyproc;
334
	int			proctype;
335
	int			i;
J
Jan Wieck 已提交
336

337
	/*
338 339
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
340
	 */
341
	if (ProcGlobal == NULL || DummyProcs == NULL)
342
		elog(PANIC, "proc header uninitialized");
343 344

	if (MyProc != NULL)
345
		elog(ERROR, "you already exist");
346

347
	/*
348 349
	 * We use the ProcStructLock to protect assignment and releasing of
	 * DummyProcs entries.
350
	 *
B
Bruce Momjian 已提交
351 352
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
353 354 355 356 357
	 */
	SpinLockAcquire(ProcStructLock);

	set_spins_per_delay(ProcGlobal->spins_per_delay);

358
	/*
359
	 * Find a free dummyproc ... *big* trouble if there isn't one ...
360
	 */
361 362 363 364 365 366 367
	for (proctype = 0; proctype < NUM_DUMMY_PROCS; proctype++)
	{
		dummyproc = &DummyProcs[proctype];
		if (dummyproc->pid == 0)
			break;
	}
	if (proctype >= NUM_DUMMY_PROCS)
368 369
	{
		SpinLockRelease(ProcStructLock);
370
		elog(FATAL, "all DummyProcs are in use");
371
	}
372

373 374 375 376 377
	/* Mark dummy proc as in use by me */
	/* use volatile pointer to prevent code rearrangement */
	((volatile PGPROC *) dummyproc)->pid = MyProcPid;

	MyProc = dummyproc;
378 379 380

	SpinLockRelease(ProcStructLock);

381
	/*
382 383
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
384 385
	 */
	SHMQueueElemInit(&(MyProc->links));
386
	MyProc->waitStatus = STATUS_OK;
387 388
	MyProc->xid = InvalidTransactionId;
	MyProc->xmin = InvalidTransactionId;
389
	MyProc->databaseId = InvalidOid;
390
	MyProc->roleId = InvalidOid;
391 392 393 394
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
	MyProc->waitLock = NULL;
395
	MyProc->waitProcLock = NULL;
396 397
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
398 399

	/*
B
Bruce Momjian 已提交
400
	 * We might be reusing a semaphore that belonged to a failed process. So
401 402
	 * be careful and reinitialize its value here.  (This is not strictly
	 * necessary anymore, but seems like a good idea for cleanliness.)
403
	 */
404
	PGSemaphoreReset(&MyProc->sem);
405 406 407 408 409

	/*
	 * Arrange to clean up at process exit.
	 */
	on_shmem_exit(DummyProcKill, Int32GetDatum(proctype));
410 411
}

412 413 414 415 416 417 418 419 420 421
/*
 * Check whether there are at least N free PGPROC objects.
 *
 * Note: this is designed on the assumption that N will generally be small.
 */
bool
HaveNFreeProcs(int n)
{
	SHMEM_OFFSET offset;
	PGPROC	   *proc;
B
Bruce Momjian 已提交
422

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	SpinLockAcquire(ProcStructLock);

	offset = procglobal->freeProcs;

	while (n > 0 && offset != INVALID_OFFSET)
	{
		proc = (PGPROC *) MAKE_PTR(offset);
		offset = proc->links.next;
		n--;
	}

	SpinLockRelease(ProcStructLock);

	return (n <= 0);
}

442 443 444
/*
 * Cancel any pending wait for lock, when aborting a transaction.
 *
445 446
 * Returns true if we had been waiting for a lock, else false.
 *
447
 * (Normally, this would only happen if we accept a cancel/die
448
 * interrupt while waiting; but an ereport(ERROR) while waiting is
449 450
 * within the realm of possibility, too.)
 */
451
bool
452 453
LockWaitCancel(void)
{
454 455
	LWLockId	partitionLock;

456
	/* Nothing to do if we weren't waiting for a lock */
457
	if (lockAwaited == NULL)
458 459
		return false;

460
	/* Turn off the deadlock timer, if it's still running (see ProcSleep) */
461
	disable_sig_alarm(false);
462 463

	/* Unlink myself from the wait queue, if on it (might not be anymore!) */
464
	partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
465
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
466

467
	if (MyProc->links.next != INVALID_OFFSET)
468 469
	{
		/* We could not have been granted the lock yet */
470
		RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
471 472 473 474 475
	}
	else
	{
		/*
		 * Somebody kicked us off the lock queue already.  Perhaps they
B
Bruce Momjian 已提交
476 477 478
		 * granted us the lock, or perhaps they detected a deadlock. If they
		 * did grant us the lock, we'd better remember it in our local lock
		 * table.
479
		 */
480 481
		if (MyProc->waitStatus == STATUS_OK)
			GrantAwaitedLock();
482 483
	}

484
	lockAwaited = NULL;
485

486
	LWLockRelease(partitionLock);
H
Hiroshi Inoue 已提交
487

488
	/*
489 490 491 492 493 494 495
	 * We used to do PGSemaphoreReset() here to ensure that our proc's wait
	 * semaphore is reset to zero.  This prevented a leftover wakeup signal
	 * from remaining in the semaphore if someone else had granted us the
	 * lock we wanted before we were able to remove ourselves from the
	 * wait-list.  However, now that ProcSleep loops until waitStatus changes,
	 * a leftover wakeup signal isn't harmful, and it seems not worth
	 * expending cycles to get rid of a signal that most likely isn't there.
496
	 */
497 498

	/*
B
Bruce Momjian 已提交
499 500
	 * Return true even if we were kicked off the lock before we were able to
	 * remove ourselves.
501 502
	 */
	return true;
H
Hiroshi Inoue 已提交
503
}
504

505

506
/*
507
 * ProcReleaseLocks() -- release locks associated with current transaction
508
 *			at main transaction commit or abort
509 510 511 512 513 514
 *
 * At main transaction commit, we release all locks except session locks.
 * At main transaction abort, we release all locks including session locks;
 * this lets us clean up after a VACUUM FULL failure.
 *
 * At subtransaction commit, we don't release any locks (so this func is not
515
 * needed at all); we will defer the releasing to the parent transaction.
516
 * At subtransaction abort, we release all locks held by the subtransaction;
517 518
 * this is implemented by retail releasing of the locks under control of
 * the ResourceOwner mechanism.
519 520
 *
 * Note that user locks are not released in any case.
521 522
 */
void
523
ProcReleaseLocks(bool isCommit)
524
{
525 526
	if (!MyProc)
		return;
527 528 529
	/* If waiting, get off wait queue (should only be needed after error) */
	LockWaitCancel();
	/* Release locks */
530
	LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
531 532 533
}


534 535 536 537 538 539 540 541 542 543
/*
 * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
 */
static void
RemoveProcFromArray(int code, Datum arg)
{
	Assert(MyProc != NULL);
	ProcArrayRemove(MyProc);
}

544 545
/*
 * ProcKill() -- Destroy the per-proc data structure for
546
 *		this process. Release any of its held LW locks.
547 548
 */
static void
549
ProcKill(int code, Datum arg)
550
{
551 552 553
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

554
	Assert(MyProc != NULL);
555

556
	/*
B
Bruce Momjian 已提交
557 558
	 * Release any LW locks I am holding.  There really shouldn't be any, but
	 * it's cheap to check again before we cut the knees off the LWLock
559
	 * facility by releasing our PGPROC ...
560
	 */
561
	LWLockReleaseAll();
562

563
	SpinLockAcquire(ProcStructLock);
564

J
Jan Wieck 已提交
565
	/* Return PGPROC structure (and semaphore) to freelist */
566 567
	MyProc->links.next = procglobal->freeProcs;
	procglobal->freeProcs = MAKE_OFFSET(MyProc);
568

J
Jan Wieck 已提交
569
	/* PGPROC struct isn't mine anymore */
570
	MyProc = NULL;
571

572 573 574
	/* Update shared estimate of spins_per_delay */
	procglobal->spins_per_delay = update_spins_per_delay(procglobal->spins_per_delay);

575 576 577 578
	SpinLockRelease(ProcStructLock);
}

/*
579
 * DummyProcKill() -- Cut-down version of ProcKill for dummy (bgwriter)
J
Jan Wieck 已提交
580
 *		processes.	The PGPROC and sema are not released, only marked
581 582 583
 *		as not-in-use.
 */
static void
584
DummyProcKill(int code, Datum arg)
585
{
B
Bruce Momjian 已提交
586 587
	int			proctype = DatumGetInt32(arg);
	PGPROC	   *dummyproc;
J
Jan Wieck 已提交
588

589
	Assert(proctype >= 0 && proctype < NUM_DUMMY_PROCS);
J
Jan Wieck 已提交
590

591
	dummyproc = &DummyProcs[proctype];
J
Jan Wieck 已提交
592

593
	Assert(MyProc == dummyproc);
594

595
	/* Release any LW locks I am holding (see notes above) */
596 597
	LWLockReleaseAll();

598 599
	SpinLockAcquire(ProcStructLock);

600
	/* Mark dummy proc no longer in use */
601 602
	MyProc->pid = 0;

J
Jan Wieck 已提交
603
	/* PGPROC struct isn't mine anymore */
604
	MyProc = NULL;
605 606 607 608 609

	/* Update shared estimate of spins_per_delay */
	ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);

	SpinLockRelease(ProcStructLock);
610 611
}

612

613 614
/*
 * ProcQueue package: routines for putting processes to sleep
615
 *		and  waking them up
616 617 618 619 620 621 622 623
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
 * Returns: a pointer to the queue or NULL
 * Side Effects: Initializes the queue if we allocated one
 */
624
#ifdef NOT_USED
625
PROC_QUEUE *
626 627
ProcQueueAlloc(char *name)
{
628 629
	bool		found;
	PROC_QUEUE *queue = (PROC_QUEUE *)
B
Bruce Momjian 已提交
630
	ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
631 632

	if (!queue)
633
		return NULL;
634 635
	if (!found)
		ProcQueueInit(queue);
636
	return queue;
637
}
638
#endif
639 640 641 642 643

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
644
ProcQueueInit(PROC_QUEUE *queue)
645
{
646 647
	SHMQueueInit(&(queue->links));
	queue->size = 0;
648 649 650 651
}


/*
652
 * ProcSleep -- put a process to sleep on the specified lock
653
 *
654 655
 * Caller must have set MyProc->heldLocks to reflect locks already held
 * on the lockable object by this process (under all XIDs).
656
 *
657
 * The lock table's partition lock must be held at entry, and will be held
658
 * at exit.
659
 *
660
 * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
661
 *
662
 * ASSUME: that no one will fiddle with the queue until after
663
 *		we release the partition lock.
664 665
 *
 * NOTES: The process queue is now a priority queue for locking.
666 667 668
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is normally zero, so when we try to acquire it, we sleep.
669 670
 */
int
671
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
672
{
673 674 675
	LOCKMODE	lockmode = locallock->tag.mode;
	LOCK	   *lock = locallock->lock;
	PROCLOCK   *proclock = locallock->proclock;
676 677
	uint32		hashcode = locallock->hashcode;
	LWLockId	partitionLock = LockHashPartitionLock(hashcode);
678
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
679
	LOCKMASK	myHeldLocks = MyProc->heldLocks;
680
	bool		early_deadlock = false;
J
Jan Wieck 已提交
681
	PGPROC	   *proc;
682
	int			i;
683

684
	/*
685 686
	 * Determine where to add myself in the wait queue.
	 *
687 688 689 690
	 * Normally I should go at the end of the queue.  However, if I already
	 * hold locks that conflict with the request of any previous waiter, put
	 * myself in the queue just in front of the first such waiter. This is not
	 * a necessary step, since deadlock detection would move me to before that
B
Bruce Momjian 已提交
691 692
	 * waiter anyway; but it's relatively cheap to detect such a conflict
	 * immediately, and avoid delaying till deadlock timeout.
693
	 *
694 695
	 * Special case: if I find I should go in front of some waiter, check to
	 * see if I conflict with already-held locks or the requests before that
B
Bruce Momjian 已提交
696 697 698 699
	 * waiter.	If not, then just grant myself the requested lock immediately.
	 * This is the same as the test for immediate grant in LockAcquire, except
	 * we are only considering the part of the wait queue before my insertion
	 * point.
700 701
	 */
	if (myHeldLocks != 0)
V
Vadim B. Mikheev 已提交
702
	{
703
		LOCKMASK	aheadRequests = 0;
704

J
Jan Wieck 已提交
705
		proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
706
		for (i = 0; i < waitQueue->size; i++)
V
Vadim B. Mikheev 已提交
707
		{
708
			/* Must he wait for me? */
B
Bruce Momjian 已提交
709
			if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
V
Vadim B. Mikheev 已提交
710
			{
711
				/* Must I wait for him ? */
B
Bruce Momjian 已提交
712
				if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
713
				{
714
					/*
B
Bruce Momjian 已提交
715 716 717 718 719
					 * Yes, so we have a deadlock.	Easiest way to clean up
					 * correctly is to call RemoveFromWaitQueue(), but we
					 * can't do that until we are *on* the wait queue. So, set
					 * a flag to check below, and break out of loop.  Also,
					 * record deadlock info for later message.
720
					 */
721
					RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
722 723
					early_deadlock = true;
					break;
724
				}
725
				/* I must go before this waiter.  Check special case. */
B
Bruce Momjian 已提交
726
				if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
727 728 729
					LockCheckConflicts(lockMethodTable,
									   lockmode,
									   lock,
730
									   proclock,
731
									   MyProc) == STATUS_OK)
732
				{
733
					/* Skip the wait and just grant myself the lock. */
734
					GrantLock(lock, proclock, lockmode);
735
					GrantAwaitedLock();
736
					return STATUS_OK;
737 738
				}
				/* Break out of loop to put myself before him */
V
Vadim B. Mikheev 已提交
739
				break;
740
			}
741
			/* Nope, so advance to next waiter */
742
			aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
J
Jan Wieck 已提交
743
			proc = (PGPROC *) MAKE_PTR(proc->links.next);
V
Vadim B. Mikheev 已提交
744
		}
B
Bruce Momjian 已提交
745

746
		/*
B
Bruce Momjian 已提交
747 748
		 * If we fall out of loop normally, proc points to waitQueue head, so
		 * we will insert at tail of queue as desired.
749
		 */
750 751 752 753
	}
	else
	{
		/* I hold no locks, so I can't push in front of anyone. */
J
Jan Wieck 已提交
754
		proc = (PGPROC *) &(waitQueue->links);
V
Vadim B. Mikheev 已提交
755
	}
756

757
	/*
B
Bruce Momjian 已提交
758
	 * Insert self into queue, ahead of the given proc (or at tail of queue).
759
	 */
760
	SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
761
	waitQueue->size++;
762

763
	lock->waitMask |= LOCKBIT_ON(lockmode);
764

J
Jan Wieck 已提交
765
	/* Set up wait information in PGPROC object, too */
766
	MyProc->waitLock = lock;
767
	MyProc->waitProcLock = proclock;
768 769
	MyProc->waitLockMode = lockmode;

770
	MyProc->waitStatus = STATUS_WAITING;
771 772

	/*
B
Bruce Momjian 已提交
773 774 775
	 * If we detected deadlock, give up without waiting.  This must agree with
	 * CheckDeadLock's recovery code, except that we shouldn't release the
	 * semaphore since we haven't tried to lock it yet.
776 777 778
	 */
	if (early_deadlock)
	{
779
		RemoveFromWaitQueue(MyProc, hashcode);
780 781
		return STATUS_ERROR;
	}
782

783
	/* mark that we are waiting for a lock */
784
	lockAwaited = locallock;
785

786
	/*
787
	 * Release the lock table's partition lock.
788
	 *
789
	 * NOTE: this may also cause us to exit critical-section state, possibly
B
Bruce Momjian 已提交
790 791
	 * allowing a cancel/die interrupt to be accepted. This is OK because we
	 * have recorded the fact that we are waiting for a lock, and so
792
	 * LockWaitCancel will clean up if cancel/die happens.
793
	 */
794
	LWLockRelease(partitionLock);
795

796
	/*
B
Bruce Momjian 已提交
797 798 799 800
	 * Set timer so we can wake up after awhile and check for a deadlock. If a
	 * deadlock is detected, the handler releases the process's semaphore and
	 * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
	 * must report failure rather than success.
801
	 *
802 803
	 * By delaying the check until we've waited for a bit, we can avoid
	 * running the rather expensive deadlock-check code in most cases.
804
	 */
805
	if (!enable_sig_alarm(DeadlockTimeout, false))
806
		elog(FATAL, "could not set timer for process wakeup");
807

808
	/*
809
	 * If someone wakes us between LWLockRelease and PGSemaphoreLock,
B
Bruce Momjian 已提交
810
	 * PGSemaphoreLock will not block.	The wakeup is "saved" by the semaphore
811 812 813 814 815
	 * implementation.  While this is normally good, there are cases where
	 * a saved wakeup might be leftover from a previous operation (for
	 * example, we aborted ProcWaitForSignal just before someone did
	 * ProcSendSignal).  So, loop to wait again if the waitStatus shows
	 * we haven't been granted nor denied the lock yet.
816
	 *
817 818 819 820 821 822 823
	 * We pass interruptOK = true, which eliminates a window in which
	 * cancel/die interrupts would be held off undesirably.  This is a promise
	 * that we don't mind losing control to a cancel/die interrupt here.  We
	 * don't, because we have no shared-state-change work to do after being
	 * granted the lock (the grantor did it all).  We do have to worry about
	 * updating the locallock table, but if we lose control to an error,
	 * LockWaitCancel will fix that up.
824
	 */
825 826 827
	do {
		PGSemaphoreLock(&MyProc->sem, true);
	} while (MyProc->waitStatus == STATUS_WAITING);
828

829
	/*
830
	 * Disable the timer, if it's still running
B
Bruce Momjian 已提交
831
	 */
832
	if (!disable_sig_alarm(false))
833
		elog(FATAL, "could not disable timer for process wakeup");
B
Bruce Momjian 已提交
834

835
	/*
836 837 838
	 * Re-acquire the lock table's partition lock.  We have to do this to
	 * hold off cancel/die interrupts before we can mess with lockAwaited
	 * (else we might have a missed or duplicated locallock update).
839
	 */
840
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
841 842 843

	/*
	 * We no longer want LockWaitCancel to do anything.
844
	 */
845
	lockAwaited = NULL;
846

847
	/*
848
	 * If we got the lock, be sure to remember it in the locallock table.
849
	 */
850
	if (MyProc->waitStatus == STATUS_OK)
851
		GrantAwaitedLock();
852

853 854 855 856
	/*
	 * We don't have to do anything else, because the awaker did all the
	 * necessary update of the lock table and MyProc.
	 */
857
	return MyProc->waitStatus;
858 859 860 861 862 863
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
864
 *	 Also remove the process from the wait queue and set its links invalid.
865
 *	 RETURN: the next process in the wait queue.
866
 *
867 868
 * The appropriate lock partition lock must be held by caller.
 *
869 870 871
 * XXX: presently, this code is only used for the "success" case, and only
 * works correctly for that case.  To clean up in failure case, would need
 * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
872
 * Hence, in practice the waitStatus parameter must be STATUS_OK.
873
 */
J
Jan Wieck 已提交
874
PGPROC *
875
ProcWakeup(PGPROC *proc, int waitStatus)
876
{
J
Jan Wieck 已提交
877
	PGPROC	   *retProc;
878

879
	/* Proc should be sleeping ... */
880 881
	if (proc->links.prev == INVALID_OFFSET ||
		proc->links.next == INVALID_OFFSET)
882
		return NULL;
883
	Assert(proc->waitStatus == STATUS_WAITING);
884

885
	/* Save next process before we zap the list link */
J
Jan Wieck 已提交
886
	retProc = (PGPROC *) MAKE_PTR(proc->links.next);
887

888
	/* Remove process from wait queue */
889
	SHMQueueDelete(&(proc->links));
890
	(proc->waitLock->waitProcs.size)--;
891

892 893
	/* Clean up process' state and pass it the ok/fail signal */
	proc->waitLock = NULL;
894
	proc->waitProcLock = NULL;
895
	proc->waitStatus = waitStatus;
896

897
	/* And awaken it */
898
	PGSemaphoreUnlock(&proc->sem);
899 900

	return retProc;
901 902 903 904
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
905 906
 *		released (or a prior waiter is aborted).  Scan all waiters
 *		for lock, waken any that are no longer blocked.
907 908
 *
 * The appropriate lock partition lock must be held by caller.
909
 */
910
void
911
ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
912
{
913 914
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
	int			queue_size = waitQueue->size;
J
Jan Wieck 已提交
915
	PGPROC	   *proc;
916
	LOCKMASK	aheadRequests = 0;
M
 
Marc G. Fournier 已提交
917

918
	Assert(queue_size >= 0);
919

920 921
	if (queue_size == 0)
		return;
922

J
Jan Wieck 已提交
923
	proc = (PGPROC *) MAKE_PTR(waitQueue->links.next);
924

925 926
	while (queue_size-- > 0)
	{
B
Bruce Momjian 已提交
927
		LOCKMODE	lockmode = proc->waitLockMode;
M
 
Marc G. Fournier 已提交
928 929

		/*
B
Bruce Momjian 已提交
930 931
		 * Waken if (a) doesn't conflict with requests of earlier waiters, and
		 * (b) doesn't conflict with already-held locks.
M
 
Marc G. Fournier 已提交
932
		 */
B
Bruce Momjian 已提交
933
		if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
934 935 936
			LockCheckConflicts(lockMethodTable,
							   lockmode,
							   lock,
937
							   proc->waitProcLock,
938
							   proc) == STATUS_OK)
M
 
Marc G. Fournier 已提交
939
		{
940
			/* OK to waken */
941
			GrantLock(lock, proc->waitProcLock, lockmode);
942
			proc = ProcWakeup(proc, STATUS_OK);
B
Bruce Momjian 已提交
943

944
			/*
B
Bruce Momjian 已提交
945 946 947
			 * ProcWakeup removes proc from the lock's waiting process queue
			 * and returns the next proc in chain; don't use proc's next-link,
			 * because it's been cleared.
948
			 */
M
 
Marc G. Fournier 已提交
949
		}
950
		else
951
		{
B
Bruce Momjian 已提交
952
			/*
B
Bruce Momjian 已提交
953
			 * Cannot wake this guy. Remember his request for later checks.
B
Bruce Momjian 已提交
954
			 */
955
			aheadRequests |= LOCKBIT_ON(lockmode);
J
Jan Wieck 已提交
956
			proc = (PGPROC *) MAKE_PTR(proc->links.next);
957
		}
M
 
Marc G. Fournier 已提交
958
	}
959 960

	Assert(waitQueue->size >= 0);
961 962
}

963 964 965
/*
 * CheckDeadLock
 *
966
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
967 968 969 970
 * while waiting for a lock to be released by some other process.  Look
 * to see if there's a deadlock; if not, just return and continue waiting.
 * If we have a real deadlock, remove ourselves from the lock's wait queue
 * and signal an error to ProcSleep.
971
 */
972
static void
973
CheckDeadLock(void)
974
{
975 976
	int			i;

977
	/*
978 979 980 981 982 983 984 985
	 * Acquire exclusive lock on the entire shared lock data structures.
	 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
	 *
	 * Note that the deadlock check interrupt had better not be enabled
	 * anywhere that this process itself holds lock partition locks, else this
	 * will wait forever.  Also note that LWLockAcquire creates a critical
	 * section, so that this routine cannot be interrupted by cancel/die
	 * interrupts.
986
	 */
987 988
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		LWLockAcquire(FirstLockMgrLock + i, LW_EXCLUSIVE);
989

990
	/*
991 992
	 * Check to see if we've been awoken by anyone in the interim.
	 *
993 994 995
	 * If we have we can return and resume our transaction -- happy day.
	 * Before we are awoken the process releasing the lock grants it to us so
	 * we know that we don't have to wait anymore.
996
	 *
997
	 * We check by looking to see if we've been unlinked from the wait queue.
B
Bruce Momjian 已提交
998
	 * This is quicker than checking our semaphore's state, since no kernel
999
	 * call is needed, and it is safe because we hold the lock partition lock.
1000 1001 1002
	 */
	if (MyProc->links.prev == INVALID_OFFSET ||
		MyProc->links.next == INVALID_OFFSET)
1003
		goto check_done;
1004

1005
#ifdef LOCK_DEBUG
B
Bruce Momjian 已提交
1006 1007
	if (Debug_deadlocks)
		DumpAllLocks();
1008 1009
#endif

1010
	if (!DeadLockCheck(MyProc))
B
Bruce Momjian 已提交
1011
	{
1012
		/* No deadlock, so keep waiting */
1013
		goto check_done;
B
Bruce Momjian 已提交
1014 1015
	}

1016
	/*
1017 1018
	 * Oops.  We have a deadlock.
	 *
1019 1020 1021 1022
	 * Get this process out of wait state.  (Note: we could do this more
	 * efficiently by relying on lockAwaited, but use this coding to preserve
	 * the flexibility to kill some other transaction than the one detecting
	 * the deadlock.)
1023 1024 1025
	 *
	 * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
	 * ProcSleep will report an error after we return from the signal handler.
1026
	 */
1027
	Assert(MyProc->waitLock != NULL);
1028
	RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1029

1030 1031 1032
	/*
	 * Unlock my semaphore so that the interrupted ProcSleep() call can
	 * finish.
1033
	 */
1034
	PGSemaphoreUnlock(&MyProc->sem);
1035

1036
	/*
B
Bruce Momjian 已提交
1037 1038 1039 1040 1041 1042 1043 1044
	 * We're done here.  Transaction abort caused by the error that ProcSleep
	 * will raise will cause any other locks we hold to be released, thus
	 * allowing other processes to wake up; we don't need to do that here.
	 * NOTE: an exception is that releasing locks we hold doesn't consider the
	 * possibility of waiters that were blocked behind us on the lock we just
	 * failed to get, and might now be wakable because we're not in front of
	 * them anymore.  However, RemoveFromWaitQueue took care of waking up any
	 * such processes.
1045
	 */
1046 1047 1048 1049 1050 1051 1052 1053 1054

	/*
	 * Release locks acquired at head of routine.  Order is not critical,
	 * so do it back-to-front to avoid waking another CheckDeadLock instance
	 * before it can get all the locks.
	 */
check_done:
	for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
		LWLockRelease(FirstLockMgrLock + i);
1055 1056 1057
}


1058 1059 1060 1061 1062 1063
/*
 * ProcWaitForSignal - wait for a signal from another backend.
 *
 * This can share the semaphore normally used for waiting for locks,
 * since a backend could never be waiting for a lock and a signal at
 * the same time.  As with locks, it's OK if the signal arrives just
1064 1065 1066 1067
 * before we actually reach the waiting state.  Also as with locks,
 * it's necessary that the caller be robust against bogus wakeups:
 * always check that the desired state has occurred, and wait again
 * if not.  This copes with possible "leftover" wakeups.
1068 1069 1070 1071
 */
void
ProcWaitForSignal(void)
{
1072
	PGSemaphoreLock(&MyProc->sem, true);
1073 1074 1075
}

/*
1076
 * ProcSendSignal - send a signal to a backend identified by PID
1077 1078
 */
void
1079
ProcSendSignal(int pid)
1080
{
1081
	PGPROC	   *proc = BackendPidGetProc(pid);
1082 1083

	if (proc != NULL)
1084
		PGSemaphoreUnlock(&proc->sem);
1085 1086 1087
}


1088 1089 1090 1091 1092 1093 1094 1095 1096
/*****************************************************************************
 * SIGALRM interrupt support
 *
 * Maybe these should be in pqsignal.c?
 *****************************************************************************/

/*
 * Enable the SIGALRM interrupt to fire after the specified delay
 *
1097
 * Delay is given in milliseconds.	Caller should be sure a SIGALRM
1098 1099
 * signal handler is installed before this is called.
 *
1100 1101
 * This code properly handles nesting of deadlock timeout alarms within
 * statement timeout alarms.
1102
 *
1103 1104 1105
 * Returns TRUE if okay, FALSE on failure.
 */
bool
1106
enable_sig_alarm(int delayms, bool is_statement_timeout)
1107
{
1108
	TimestampTz fin_time;
1109
	struct itimerval timeval;
1110

1111 1112
	if (is_statement_timeout)
	{
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
		/*
		 * Begin statement-level timeout
		 *
		 * Note that we compute statement_fin_time with reference to the
		 * statement_timestamp, but apply the specified delay without any
		 * correction; that is, we ignore whatever time has elapsed since
		 * statement_timestamp was set.  In the normal case only a small
		 * interval will have elapsed and so this doesn't matter, but there
		 * are corner cases (involving multi-statement query strings with
		 * embedded COMMIT or ROLLBACK) where we might re-initialize the
		 * statement timeout long after initial receipt of the message.
		 * In such cases the enforcement of the statement timeout will be
		 * a bit inconsistent.  This annoyance is judged not worth the cost
		 * of performing an additional gettimeofday() here.
		 */
1128
		Assert(!deadlock_timeout_active);
1129 1130
		fin_time = GetCurrentStatementStartTimestamp();
		fin_time = TimestampTzPlusMilliseconds(fin_time, delayms);
1131
		statement_fin_time = fin_time;
1132
		cancel_from_timeout = false;
1133
		statement_timeout_active = true;
1134 1135 1136 1137 1138 1139
	}
	else if (statement_timeout_active)
	{
		/*
		 * Begin deadlock timeout with statement-level timeout active
		 *
1140 1141 1142 1143
		 * Here, we want to interrupt at the closer of the two timeout times.
		 * If fin_time >= statement_fin_time then we need not touch the
		 * existing timer setting; else set up to interrupt at the deadlock
		 * timeout time.
1144 1145 1146
		 *
		 * NOTE: in this case it is possible that this routine will be
		 * interrupted by the previously-set timer alarm.  This is okay
B
Bruce Momjian 已提交
1147 1148 1149
		 * because the signal handler will do only what it should do according
		 * to the state variables.	The deadlock checker may get run earlier
		 * than normal, but that does no harm.
1150
		 */
1151 1152
		fin_time = GetCurrentTimestamp();
		fin_time = TimestampTzPlusMilliseconds(fin_time, delayms);
1153
		deadlock_timeout_active = true;
1154
		if (fin_time >= statement_fin_time)
1155 1156 1157 1158 1159 1160 1161
			return true;
	}
	else
	{
		/* Begin deadlock timeout with no statement-level timeout */
		deadlock_timeout_active = true;
	}
1162

1163
	/* If we reach here, okay to set the timer interrupt */
1164
	MemSet(&timeval, 0, sizeof(struct itimerval));
1165 1166
	timeval.it_value.tv_sec = delayms / 1000;
	timeval.it_value.tv_usec = (delayms % 1000) * 1000;
1167
	if (setitimer(ITIMER_REAL, &timeval, NULL))
1168
		return false;
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	return true;
}

/*
 * Cancel the SIGALRM timer, either for a deadlock timeout or a statement
 * timeout.  If a deadlock timeout is canceled, any active statement timeout
 * remains in force.
 *
 * Returns TRUE if okay, FALSE on failure.
 */
bool
disable_sig_alarm(bool is_statement_timeout)
{
	/*
	 * Always disable the interrupt if it is active; this avoids being
	 * interrupted by the signal handler and thereby possibly getting
	 * confused.
	 *
	 * We will re-enable the interrupt if necessary in CheckStatementTimeout.
	 */
	if (statement_timeout_active || deadlock_timeout_active)
1190
	{
1191
		struct itimerval timeval;
1192

1193
		MemSet(&timeval, 0, sizeof(struct itimerval));
1194
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1195
		{
1196 1197 1198
			statement_timeout_active = false;
			cancel_from_timeout = false;
			deadlock_timeout_active = false;
1199 1200
			return false;
		}
1201 1202
	}

1203 1204 1205 1206
	/* Always cancel deadlock timeout, in case this is error cleanup */
	deadlock_timeout_active = false;

	/* Cancel or reschedule statement timeout */
1207
	if (is_statement_timeout)
1208
	{
1209
		statement_timeout_active = false;
1210 1211
		cancel_from_timeout = false;
	}
1212 1213 1214 1215 1216
	else if (statement_timeout_active)
	{
		if (!CheckStatementTimeout())
			return false;
	}
1217 1218 1219
	return true;
}

1220

1221
/*
1222 1223 1224
 * Check for statement timeout.  If the timeout time has come,
 * trigger a query-cancel interrupt; if not, reschedule the SIGALRM
 * interrupt to occur at the right time.
1225
 *
1226
 * Returns true if okay, false if failed to set the interrupt.
1227
 */
1228 1229
static bool
CheckStatementTimeout(void)
1230
{
1231
	TimestampTz now;
B
Bruce Momjian 已提交
1232

1233 1234 1235
	if (!statement_timeout_active)
		return true;			/* do nothing if not active */

1236
	now = GetCurrentTimestamp();
1237

1238
	if (now >= statement_fin_time)
1239
	{
1240 1241
		/* Time to die */
		statement_timeout_active = false;
1242
		cancel_from_timeout = true;
1243
		kill(MyProcPid, SIGINT);
1244 1245 1246 1247
	}
	else
	{
		/* Not time yet, so (re)schedule the interrupt */
1248 1249
		long		secs;
		int			usecs;
1250 1251
		struct itimerval timeval;

1252 1253 1254 1255 1256 1257 1258 1259
		TimestampDifference(now, statement_fin_time,
							&secs, &usecs);
		/*
		 * It's possible that the difference is less than a microsecond;
		 * ensure we don't cancel, rather than set, the interrupt.
		 */
		if (secs == 0 && usecs == 0)
			usecs = 1;
1260
		MemSet(&timeval, 0, sizeof(struct itimerval));
1261 1262
		timeval.it_value.tv_sec = secs;
		timeval.it_value.tv_usec = usecs;
1263
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1264 1265 1266
			return false;
	}

1267 1268
	return true;
}
1269 1270 1271


/*
1272 1273 1274 1275 1276 1277
 * Signal handler for SIGALRM
 *
 * Process deadlock check and/or statement timeout check, as needed.
 * To avoid various edge cases, we must be careful to do nothing
 * when there is nothing to be done.  We also need to be able to
 * reschedule the timer interrupt if called before end of statement.
1278 1279 1280 1281
 */
void
handle_sig_alarm(SIGNAL_ARGS)
{
1282 1283 1284
	int			save_errno = errno;

	if (deadlock_timeout_active)
1285
	{
1286
		deadlock_timeout_active = false;
1287 1288
		CheckDeadLock();
	}
1289 1290 1291 1292 1293

	if (statement_timeout_active)
		(void) CheckStatementTimeout();

	errno = save_errno;
1294
}