proc.c 51.1 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * proc.c
4
 *	  routines to manage per-process shared memory data structure
5
 *
6
 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
B
Add:  
Bruce Momjian 已提交
7
 * Portions Copyright (c) 1994, Regents of the University of California
8 9 10
 *
 *
 * IDENTIFICATION
11
 *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.219 2010/05/26 19:52:52 sriggs Exp $
12 13 14 15 16
 *
 *-------------------------------------------------------------------------
 */
/*
 * Interface (a):
17
 *		ProcSleep(), ProcWakeup(),
18 19
 *		ProcQueueAlloc() -- create a shm queue for sleeping processes
 *		ProcQueueInit() -- create a queue without allocing memory
20
 *
21 22
 * Waiting for a lock causes the backend to be put to sleep.  Whoever releases
 * the lock wakes the process up again (and gives it an error code so it knows
23 24 25 26
 * whether it was awoken on an error condition).
 *
 * Interface (b):
 *
27 28
 * ProcReleaseLocks -- frees the locks associated with current transaction
 *
29
 * ProcKill -- destroys the shared memory state (and locks)
30
 * associated with the process.
31
 */
32 33
#include "postgres.h"

34
#include <signal.h>
35 36
#include <unistd.h>
#include <sys/time.h>
M
Marc G. Fournier 已提交
37

38
#include "access/transam.h"
39
#include "access/xact.h"
40
#include "miscadmin.h"
41
#include "postmaster/autovacuum.h"
42
#include "replication/walsender.h"
43
#include "storage/ipc.h"
44
#include "storage/lmgr.h"
45
#include "storage/pmsignal.h"
46
#include "storage/proc.h"
47
#include "storage/procarray.h"
48
#include "storage/procsignal.h"
49
#include "storage/spin.h"
50

51

52
/* GUC variables */
B
Bruce Momjian 已提交
53
int			DeadlockTimeout = 1000;
54
int			StatementTimeout = 0;
55
bool		log_lock_waits = false;
M
 
Marc G. Fournier 已提交
56

57
/* Pointer to this process's PGPROC struct, if any */
J
Jan Wieck 已提交
58
PGPROC	   *MyProc = NULL;
59 60

/*
J
Jan Wieck 已提交
61
 * This spinlock protects the freelist of recycled PGPROC structures.
62
 * We cannot use an LWLock because the LWLock manager depends on already
J
Jan Wieck 已提交
63
 * having a PGPROC and a wait semaphore!  But these structures are touched
64 65
 * relatively infrequently (only at backend startup or shutdown) and not for
 * very long, so a spinlock is okay.
66
 */
67
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
68

69
/* Pointers to shared-memory structures */
70
NON_EXEC_STATIC PROC_HDR *ProcGlobal = NULL;
71
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
72

73 74
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
75

76
/* Mark these volatile because they can be changed by signal handler */
77
static volatile bool standby_timeout_active = false;
78 79
static volatile bool statement_timeout_active = false;
static volatile bool deadlock_timeout_active = false;
80
static volatile DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
81
volatile bool cancel_from_timeout = false;
B
Bruce Momjian 已提交
82

83 84 85
/* timeout_start_time is set when log_lock_waits is true */
static TimestampTz timeout_start_time;

86
/* statement_fin_time is valid only if statement_timeout_active is true */
87
static TimestampTz statement_fin_time;
88
static TimestampTz statement_fin_time2; /* valid only in recovery */
89 90


91
static void RemoveProcFromArray(int code, Datum arg);
92
static void ProcKill(int code, Datum arg);
93
static void AuxiliaryProcKill(int code, Datum arg);
94
static bool CheckStatementTimeout(void);
95
static bool CheckStandbyTimeout(void);
96

V
Vadim B. Mikheev 已提交
97

98 99 100
/*
 * Report shared-memory space needed by InitProcGlobal.
 */
101
Size
102
ProcGlobalShmemSize(void)
103
{
104 105 106 107
	Size		size = 0;

	/* ProcGlobal */
	size = add_size(size, sizeof(PROC_HDR));
108 109
	/* AuxiliaryProcs */
	size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
110
	/* MyProcs, including autovacuum workers and launcher */
111 112 113
	size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
	/* ProcStructLock */
	size = add_size(size, sizeof(slock_t));
114 115
	/* startupBufferPinWaitBufId */
	size = add_size(size, sizeof(NBuffers));
116 117 118 119

	return size;
}

120 121 122 123
/*
 * Report number of semaphores needed by InitProcGlobal.
 */
int
124
ProcGlobalSemas(void)
125
{
126 127 128 129
	/*
	 * We need a sema per backend (including autovacuum), plus one for each
	 * auxiliary process.
	 */
130
	return MaxBackends + NUM_AUXILIARY_PROCS;
131 132
}

133 134
/*
 * InitProcGlobal -
135 136
 *	  Initialize the global process table during postmaster or standalone
 *	  backend startup.
137
 *
138
 *	  We also create all the per-process semaphores we will need to support
139 140 141 142 143 144 145
 *	  the requested number of backends.  We used to allocate semaphores
 *	  only when backends were actually started up, but that is bad because
 *	  it lets Postgres fail under load --- a lot of Unix systems are
 *	  (mis)configured with small limits on the number of semaphores, and
 *	  running out when trying to start another backend is a common failure.
 *	  So, now we grab enough semaphores to support the desired max number
 *	  of backends immediately at initialization --- if the sysadmin has set
146 147
 *	  MaxConnections or autovacuum_max_workers higher than his kernel will
 *	  support, he'll find out sooner rather than later.
148 149 150 151
 *
 *	  Another reason for creating semaphores here is that the semaphore
 *	  implementation typically requires us to create semaphores in the
 *	  postmaster, not in backends.
152 153
 *
 * Note: this is NOT called by individual backends under a postmaster,
154
 * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
155
 * pointers must be propagated specially for EXEC_BACKEND operation.
156 157
 */
void
158
InitProcGlobal(void)
159
{
160 161 162
	PGPROC	   *procs;
	int			i;
	bool		found;
163

164
	/* Create the ProcGlobal shared structure */
165
	ProcGlobal = (PROC_HDR *)
166 167
		ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
	Assert(!found);
168

169
	/*
170 171
	 * Create the PGPROC structures for auxiliary (bgwriter) processes, too.
	 * These do not get linked into the freeProcs list.
172
	 */
173 174
	AuxiliaryProcs = (PGPROC *)
		ShmemInitStruct("AuxiliaryProcs", NUM_AUXILIARY_PROCS * sizeof(PGPROC),
175 176
						&found);
	Assert(!found);
177

178 179 180
	/*
	 * Initialize the data structures.
	 */
181 182
	ProcGlobal->freeProcs = NULL;
	ProcGlobal->autovacFreeProcs = NULL;
183

184
	ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
185

186 187 188
	/*
	 * Pre-create the PGPROC structures and create a semaphore for each.
	 */
189
	procs = (PGPROC *) ShmemAlloc((MaxConnections) * sizeof(PGPROC));
190 191 192 193
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
194 195
	MemSet(procs, 0, MaxConnections * sizeof(PGPROC));
	for (i = 0; i < MaxConnections; i++)
196 197
	{
		PGSemaphoreCreate(&(procs[i].sem));
198 199
		procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
		ProcGlobal->freeProcs = &procs[i];
200
	}
201

202 203 204 205 206 207
	/*
	 * Likewise for the PGPROCs reserved for autovacuum.
	 *
	 * Note: the "+1" here accounts for the autovac launcher
	 */
	procs = (PGPROC *) ShmemAlloc((autovacuum_max_workers + 1) * sizeof(PGPROC));
208 209 210 211
	if (!procs)
		ereport(FATAL,
				(errcode(ERRCODE_OUT_OF_MEMORY),
				 errmsg("out of shared memory")));
212 213
	MemSet(procs, 0, (autovacuum_max_workers + 1) * sizeof(PGPROC));
	for (i = 0; i < autovacuum_max_workers + 1; i++)
214 215
	{
		PGSemaphoreCreate(&(procs[i].sem));
216 217
		procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
		ProcGlobal->autovacFreeProcs = &procs[i];
218 219
	}

220 221 222
	/*
	 * And auxiliary procs.
	 */
223 224
	MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC));
	for (i = 0; i < NUM_AUXILIARY_PROCS; i++)
225
	{
B
Bruce Momjian 已提交
226
		AuxiliaryProcs[i].pid = 0;		/* marks auxiliary proc as not in use */
227
		PGSemaphoreCreate(&(AuxiliaryProcs[i].sem));
228
	}
229 230 231 232

	/* Create ProcStructLock spinlock, too */
	ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
	SpinLockInit(ProcStructLock);
233 234
}

235
/*
236
 * InitProcess -- initialize a per-process data structure for this backend
237 238
 */
void
239
InitProcess(void)
240
{
241 242
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;
243
	int			i;
244 245

	/*
246 247
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
248
	 */
249
	if (procglobal == NULL)
250
		elog(PANIC, "proc header uninitialized");
251 252

	if (MyProc != NULL)
253
		elog(ERROR, "you already exist");
254

255
	/*
B
Bruce Momjian 已提交
256 257
	 * Try to get a proc struct from the free list.  If this fails, we must be
	 * out of PGPROC structures (not to mention semaphores).
258
	 *
B
Bruce Momjian 已提交
259 260
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
261
	 */
262
	SpinLockAcquire(ProcStructLock);
263

264 265
	set_spins_per_delay(procglobal->spins_per_delay);

266
	if (IsAnyAutoVacuumProcess())
267
		MyProc = procglobal->autovacFreeProcs;
268
	else
269
		MyProc = procglobal->freeProcs;
270

271
	if (MyProc != NULL)
272
	{
273
		if (IsAnyAutoVacuumProcess())
274
			procglobal->autovacFreeProcs = (PGPROC *) MyProc->links.next;
275
		else
276
			procglobal->freeProcs = (PGPROC *) MyProc->links.next;
277
		SpinLockRelease(ProcStructLock);
278 279 280 281
	}
	else
	{
		/*
B
Bruce Momjian 已提交
282 283
		 * If we reach here, all the PGPROCs are in use.  This is one of the
		 * possible places to detect "too many backends", so give the standard
284 285
		 * error message.  XXX do we need to give a different failure message
		 * in the autovacuum case?
286
		 */
287
		SpinLockRelease(ProcStructLock);
288 289 290
		ereport(FATAL,
				(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
				 errmsg("sorry, too many clients already")));
291
	}
292

293 294
	/*
	 * Now that we have a PGPROC, mark ourselves as an active postmaster
295
	 * child; this is so that the postmaster can detect it if we exit without
296 297
	 * cleaning up.  (XXX autovac launcher currently doesn't participate in
	 * this; it probably should.)
298
	 */
299
	if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
300 301 302 303 304 305
	{
		if (am_walsender)
			MarkPostmasterChildWalSender();
		else
			MarkPostmasterChildActive();
	}
306

307
	/*
B
Bruce Momjian 已提交
308 309
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
310
	 */
311
	SHMQueueElemInit(&(MyProc->links));
312
	MyProc->waitStatus = STATUS_OK;
313
	MyProc->lxid = InvalidLocalTransactionId;
314
	MyProc->xid = InvalidTransactionId;
315
	MyProc->xmin = InvalidTransactionId;
316
	MyProc->pid = MyProcPid;
317 318
	/* backendId, databaseId and roleId will be filled in later */
	MyProc->backendId = InvalidBackendId;
319
	MyProc->databaseId = InvalidOid;
320
	MyProc->roleId = InvalidOid;
321
	MyProc->inCommit = false;
322
	MyProc->vacuumFlags = 0;
323
	/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
324 325
	if (IsAutoVacuumWorkerProcess())
		MyProc->vacuumFlags |= PROC_IS_AUTOVACUUM;
326 327 328
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
329
	MyProc->waitLock = NULL;
330
	MyProc->waitProcLock = NULL;
331 332
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
333
	MyProc->recoveryConflictPending = false;
334

335
	/*
336
	 * We might be reusing a semaphore that belonged to a failed process. So
B
Bruce Momjian 已提交
337
	 * be careful and reinitialize its value here.	(This is not strictly
338
	 * necessary anymore, but seems like a good idea for cleanliness.)
339
	 */
340
	PGSemaphoreReset(&MyProc->sem);
341

342
	/*
343
	 * Arrange to clean up at backend exit.
344
	 */
345
	on_shmem_exit(ProcKill, 0);
346 347

	/*
B
Bruce Momjian 已提交
348 349
	 * Now that we have a PGPROC, we could try to acquire locks, so initialize
	 * the deadlock checker.
350 351
	 */
	InitDeadLockChecking();
352 353
}

354 355 356 357
/*
 * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
 *
 * This is separate from InitProcess because we can't acquire LWLocks until
358 359
 * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
 * work until after we've done CreateSharedMemoryAndSemaphores.
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
 */
void
InitProcessPhase2(void)
{
	Assert(MyProc != NULL);

	/*
	 * Add our PGPROC to the PGPROC array in shared memory.
	 */
	ProcArrayAdd(MyProc);

	/*
	 * Arrange to clean that up at backend exit.
	 */
	on_shmem_exit(RemoveProcFromArray, 0);
}

377
/*
378
 * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
379
 *
380 381
 * This is called by bgwriter and similar processes so that they will have a
 * MyProc value that's real enough to let them wait for LWLocks.  The PGPROC
382
 * and sema that are assigned are one of the extra ones created during
383
 * InitProcGlobal.
384
 *
385
 * Auxiliary processes are presently not expected to wait for real (lockmgr)
386
 * locks, so we need not set up the deadlock checker.  They are never added
B
Bruce Momjian 已提交
387
 * to the ProcArray or the sinval messaging mechanism, either.	They also
388 389
 * don't get a VXID assigned, since this is only useful when we actually
 * hold lockmgr locks.
390 391 392 393 394
 *
 * Startup process however uses locks but never waits for them in the
 * normal backend sense. Startup process also takes part in sinval messaging
 * as a sendOnly process, so never reads messages from sinval queue. So
 * Startup process does have a VXID and does show up in pg_locks.
395 396
 */
void
397
InitAuxiliaryProcess(void)
398
{
399
	PGPROC	   *auxproc;
400
	int			proctype;
401
	int			i;
J
Jan Wieck 已提交
402

403
	/*
404 405
	 * ProcGlobal should be set up already (if we are a backend, we inherit
	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
406
	 */
407
	if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
408
		elog(PANIC, "proc header uninitialized");
409 410

	if (MyProc != NULL)
411
		elog(ERROR, "you already exist");
412

413
	/*
414
	 * We use the ProcStructLock to protect assignment and releasing of
415
	 * AuxiliaryProcs entries.
416
	 *
B
Bruce Momjian 已提交
417 418
	 * While we are holding the ProcStructLock, also copy the current shared
	 * estimate of spins_per_delay to local storage.
419 420 421 422 423
	 */
	SpinLockAcquire(ProcStructLock);

	set_spins_per_delay(ProcGlobal->spins_per_delay);

424
	/*
425
	 * Find a free auxproc ... *big* trouble if there isn't one ...
426
	 */
427
	for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
428
	{
429 430
		auxproc = &AuxiliaryProcs[proctype];
		if (auxproc->pid == 0)
431 432
			break;
	}
433
	if (proctype >= NUM_AUXILIARY_PROCS)
434 435
	{
		SpinLockRelease(ProcStructLock);
436
		elog(FATAL, "all AuxiliaryProcs are in use");
437
	}
438

439
	/* Mark auxiliary proc as in use by me */
440
	/* use volatile pointer to prevent code rearrangement */
441
	((volatile PGPROC *) auxproc)->pid = MyProcPid;
442

443
	MyProc = auxproc;
444 445 446

	SpinLockRelease(ProcStructLock);

447
	/*
448 449
	 * Initialize all fields of MyProc, except for the semaphore which was
	 * prepared for us by InitProcGlobal.
450 451
	 */
	SHMQueueElemInit(&(MyProc->links));
452
	MyProc->waitStatus = STATUS_OK;
453
	MyProc->lxid = InvalidLocalTransactionId;
454 455
	MyProc->xid = InvalidTransactionId;
	MyProc->xmin = InvalidTransactionId;
456
	MyProc->backendId = InvalidBackendId;
457
	MyProc->databaseId = InvalidOid;
458
	MyProc->roleId = InvalidOid;
459
	MyProc->inCommit = false;
460
	MyProc->vacuumFlags = 0;
461 462 463 464
	MyProc->lwWaiting = false;
	MyProc->lwExclusive = false;
	MyProc->lwWaitLink = NULL;
	MyProc->waitLock = NULL;
465
	MyProc->waitProcLock = NULL;
466 467
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		SHMQueueInit(&(MyProc->myProcLocks[i]));
468 469

	/*
B
Bruce Momjian 已提交
470
	 * We might be reusing a semaphore that belonged to a failed process. So
B
Bruce Momjian 已提交
471
	 * be careful and reinitialize its value here.	(This is not strictly
472
	 * necessary anymore, but seems like a good idea for cleanliness.)
473
	 */
474
	PGSemaphoreReset(&MyProc->sem);
475 476 477 478

	/*
	 * Arrange to clean up at process exit.
	 */
479
	on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
480 481
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495
/*
 * Record the PID and PGPROC structures for the Startup process, for use in
 * ProcSendSignal().  See comments there for further explanation.
 */
void
PublishStartupProcessInformation(void)
{
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	SpinLockAcquire(ProcStructLock);

	procglobal->startupProc = MyProc;
	procglobal->startupProcPid = MyProcPid;
496
	procglobal->startupBufferPinWaitBufId = 0;
497 498 499 500

	SpinLockRelease(ProcStructLock);
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
/*
 * Used from bufgr to share the value of the buffer that Startup waits on,
 * or to reset the value to "not waiting" (-1). This allows processing
 * of recovery conflicts for buffer pins. Set is made before backends look
 * at this value, so locking not required, especially since the set is
 * an atomic integer set operation.
 */
void
SetStartupBufferPinWaitBufId(int bufid)
{
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	procglobal->startupBufferPinWaitBufId = bufid;
}

/*
 * Used by backends when they receive a request to check for buffer pin waits.
 */
int
GetStartupBufferPinWaitBufId(void)
{
B
Bruce Momjian 已提交
523
	int			bufid;
524 525 526 527 528 529 530 531 532

	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	bufid = procglobal->startupBufferPinWaitBufId;

	return bufid;
}

533 534 535 536 537 538 539 540 541
/*
 * Check whether there are at least N free PGPROC objects.
 *
 * Note: this is designed on the assumption that N will generally be small.
 */
bool
HaveNFreeProcs(int n)
{
	PGPROC	   *proc;
B
Bruce Momjian 已提交
542

543 544 545 546 547
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

	SpinLockAcquire(ProcStructLock);

548
	proc = procglobal->freeProcs;
549

550
	while (n > 0 && proc != NULL)
551
	{
552
		proc = (PGPROC *) proc->links.next;
553 554 555 556 557 558 559 560
		n--;
	}

	SpinLockRelease(ProcStructLock);

	return (n <= 0);
}

561 562 563 564 565 566 567 568 569
bool
IsWaitingForLock(void)
{
	if (lockAwaited == NULL)
		return false;

	return true;
}

570 571 572 573
/*
 * Cancel any pending wait for lock, when aborting a transaction.
 *
 * (Normally, this would only happen if we accept a cancel/die
574
 * interrupt while waiting; but an ereport(ERROR) while waiting is
575 576
 * within the realm of possibility, too.)
 */
577
void
578 579
LockWaitCancel(void)
{
580 581
	LWLockId	partitionLock;

582
	/* Nothing to do if we weren't waiting for a lock */
583
	if (lockAwaited == NULL)
584
		return;
585

586
	/* Turn off the deadlock timer, if it's still running (see ProcSleep) */
587
	disable_sig_alarm(false);
588 589

	/* Unlink myself from the wait queue, if on it (might not be anymore!) */
590
	partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
591
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
592

593
	if (MyProc->links.next != NULL)
594 595
	{
		/* We could not have been granted the lock yet */
596
		RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
597 598 599 600 601
	}
	else
	{
		/*
		 * Somebody kicked us off the lock queue already.  Perhaps they
B
Bruce Momjian 已提交
602 603 604
		 * granted us the lock, or perhaps they detected a deadlock. If they
		 * did grant us the lock, we'd better remember it in our local lock
		 * table.
605
		 */
606 607
		if (MyProc->waitStatus == STATUS_OK)
			GrantAwaitedLock();
608 609
	}

610
	lockAwaited = NULL;
611

612
	LWLockRelease(partitionLock);
H
Hiroshi Inoue 已提交
613

614
	/*
615
	 * We used to do PGSemaphoreReset() here to ensure that our proc's wait
B
Bruce Momjian 已提交
616 617 618 619 620 621
	 * semaphore is reset to zero.	This prevented a leftover wakeup signal
	 * from remaining in the semaphore if someone else had granted us the lock
	 * we wanted before we were able to remove ourselves from the wait-list.
	 * However, now that ProcSleep loops until waitStatus changes, a leftover
	 * wakeup signal isn't harmful, and it seems not worth expending cycles to
	 * get rid of a signal that most likely isn't there.
622
	 */
H
Hiroshi Inoue 已提交
623
}
624

625

626
/*
627
 * ProcReleaseLocks() -- release locks associated with current transaction
628
 *			at main transaction commit or abort
629 630
 *
 * At main transaction commit, we release all locks except session locks.
631
 * At main transaction abort, we release all locks including session locks.
632 633
 *
 * At subtransaction commit, we don't release any locks (so this func is not
634
 * needed at all); we will defer the releasing to the parent transaction.
635
 * At subtransaction abort, we release all locks held by the subtransaction;
636 637
 * this is implemented by retail releasing of the locks under control of
 * the ResourceOwner mechanism.
638 639
 *
 * Note that user locks are not released in any case.
640 641
 */
void
642
ProcReleaseLocks(bool isCommit)
643
{
644 645
	if (!MyProc)
		return;
646 647 648
	/* If waiting, get off wait queue (should only be needed after error) */
	LockWaitCancel();
	/* Release locks */
649
	LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
650 651 652
}


653 654 655 656 657 658 659
/*
 * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
 */
static void
RemoveProcFromArray(int code, Datum arg)
{
	Assert(MyProc != NULL);
660
	ProcArrayRemove(MyProc, InvalidTransactionId);
661 662
}

663 664
/*
 * ProcKill() -- Destroy the per-proc data structure for
665
 *		this process. Release any of its held LW locks.
666 667
 */
static void
668
ProcKill(int code, Datum arg)
669
{
670 671 672
	/* use volatile pointer to prevent code rearrangement */
	volatile PROC_HDR *procglobal = ProcGlobal;

673
	Assert(MyProc != NULL);
674

675
	/*
B
Bruce Momjian 已提交
676 677
	 * Release any LW locks I am holding.  There really shouldn't be any, but
	 * it's cheap to check again before we cut the knees off the LWLock
678
	 * facility by releasing our PGPROC ...
679
	 */
680
	LWLockReleaseAll();
681

682
	SpinLockAcquire(ProcStructLock);
683

684 685
	/* Return PGPROC structure (and semaphore) to appropriate freelist */
	if (IsAnyAutoVacuumProcess())
686
	{
687 688
		MyProc->links.next = (SHM_QUEUE *) procglobal->autovacFreeProcs;
		procglobal->autovacFreeProcs = MyProc;
689 690 691
	}
	else
	{
692 693
		MyProc->links.next = (SHM_QUEUE *) procglobal->freeProcs;
		procglobal->freeProcs = MyProc;
694
	}
695

J
Jan Wieck 已提交
696
	/* PGPROC struct isn't mine anymore */
697
	MyProc = NULL;
698

699 700 701
	/* Update shared estimate of spins_per_delay */
	procglobal->spins_per_delay = update_spins_per_delay(procglobal->spins_per_delay);

702
	SpinLockRelease(ProcStructLock);
703

704 705
	/*
	 * This process is no longer present in shared memory in any meaningful
B
Bruce Momjian 已提交
706 707
	 * way, so tell the postmaster we've cleaned up acceptably well. (XXX
	 * autovac launcher should be included here someday)
708
	 */
709
	if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
710 711
		MarkPostmasterChildInactive();

712 713
	/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
	if (AutovacuumLauncherPid != 0)
714
		kill(AutovacuumLauncherPid, SIGUSR2);
715 716 717
}

/*
718 719 720
 * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
 *		processes (bgwriter, etc).	The PGPROC and sema are not released, only
 *		marked as not-in-use.
721 722
 */
static void
723
AuxiliaryProcKill(int code, Datum arg)
724
{
B
Bruce Momjian 已提交
725
	int			proctype = DatumGetInt32(arg);
726
	PGPROC	   *auxproc;
J
Jan Wieck 已提交
727

728
	Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
J
Jan Wieck 已提交
729

730
	auxproc = &AuxiliaryProcs[proctype];
J
Jan Wieck 已提交
731

732
	Assert(MyProc == auxproc);
733

734
	/* Release any LW locks I am holding (see notes above) */
735 736
	LWLockReleaseAll();

737 738
	SpinLockAcquire(ProcStructLock);

739
	/* Mark auxiliary proc no longer in use */
740 741
	MyProc->pid = 0;

J
Jan Wieck 已提交
742
	/* PGPROC struct isn't mine anymore */
743
	MyProc = NULL;
744 745 746 747 748

	/* Update shared estimate of spins_per_delay */
	ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);

	SpinLockRelease(ProcStructLock);
749 750
}

751

752 753
/*
 * ProcQueue package: routines for putting processes to sleep
754
 *		and  waking them up
755 756 757 758 759
 */

/*
 * ProcQueueAlloc -- alloc/attach to a shared memory process queue
 *
760 761
 * Returns: a pointer to the queue
 * Side Effects: Initializes the queue if it wasn't there before
762
 */
763
#ifdef NOT_USED
764
PROC_QUEUE *
765
ProcQueueAlloc(const char *name)
766
{
767
	PROC_QUEUE *queue;
768
	bool		found;
769

770 771 772
	queue = (PROC_QUEUE *)
		ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);

773 774
	if (!found)
		ProcQueueInit(queue);
775

776
	return queue;
777
}
778
#endif
779 780 781 782 783

/*
 * ProcQueueInit -- initialize a shared memory process queue
 */
void
784
ProcQueueInit(PROC_QUEUE *queue)
785
{
786 787
	SHMQueueInit(&(queue->links));
	queue->size = 0;
788 789 790 791
}


/*
792
 * ProcSleep -- put a process to sleep on the specified lock
793
 *
794 795
 * Caller must have set MyProc->heldLocks to reflect locks already held
 * on the lockable object by this process (under all XIDs).
796
 *
797
 * The lock table's partition lock must be held at entry, and will be held
798
 * at exit.
799
 *
800
 * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
801
 *
802
 * ASSUME: that no one will fiddle with the queue until after
803
 *		we release the partition lock.
804 805
 *
 * NOTES: The process queue is now a priority queue for locking.
806 807 808
 *
 * P() on the semaphore should put us to sleep.  The process
 * semaphore is normally zero, so when we try to acquire it, we sleep.
809 810
 */
int
811
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
812
{
813 814 815
	LOCKMODE	lockmode = locallock->tag.mode;
	LOCK	   *lock = locallock->lock;
	PROCLOCK   *proclock = locallock->proclock;
816 817
	uint32		hashcode = locallock->hashcode;
	LWLockId	partitionLock = LockHashPartitionLock(hashcode);
818
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
819
	LOCKMASK	myHeldLocks = MyProc->heldLocks;
820
	bool		early_deadlock = false;
B
Bruce Momjian 已提交
821
	bool		allow_autovacuum_cancel = true;
822
	int			myWaitStatus;
J
Jan Wieck 已提交
823
	PGPROC	   *proc;
824
	int			i;
825

826
	/*
827 828
	 * Determine where to add myself in the wait queue.
	 *
829 830 831 832
	 * Normally I should go at the end of the queue.  However, if I already
	 * hold locks that conflict with the request of any previous waiter, put
	 * myself in the queue just in front of the first such waiter. This is not
	 * a necessary step, since deadlock detection would move me to before that
B
Bruce Momjian 已提交
833 834
	 * waiter anyway; but it's relatively cheap to detect such a conflict
	 * immediately, and avoid delaying till deadlock timeout.
835
	 *
836 837
	 * Special case: if I find I should go in front of some waiter, check to
	 * see if I conflict with already-held locks or the requests before that
B
Bruce Momjian 已提交
838 839 840 841
	 * waiter.	If not, then just grant myself the requested lock immediately.
	 * This is the same as the test for immediate grant in LockAcquire, except
	 * we are only considering the part of the wait queue before my insertion
	 * point.
842 843
	 */
	if (myHeldLocks != 0)
V
Vadim B. Mikheev 已提交
844
	{
845
		LOCKMASK	aheadRequests = 0;
846

847
		proc = (PGPROC *) waitQueue->links.next;
848
		for (i = 0; i < waitQueue->size; i++)
V
Vadim B. Mikheev 已提交
849
		{
850
			/* Must he wait for me? */
B
Bruce Momjian 已提交
851
			if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
V
Vadim B. Mikheev 已提交
852
			{
853
				/* Must I wait for him ? */
B
Bruce Momjian 已提交
854
				if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
855
				{
856
					/*
B
Bruce Momjian 已提交
857 858 859 860 861
					 * Yes, so we have a deadlock.	Easiest way to clean up
					 * correctly is to call RemoveFromWaitQueue(), but we
					 * can't do that until we are *on* the wait queue. So, set
					 * a flag to check below, and break out of loop.  Also,
					 * record deadlock info for later message.
862
					 */
863
					RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
864 865
					early_deadlock = true;
					break;
866
				}
867
				/* I must go before this waiter.  Check special case. */
B
Bruce Momjian 已提交
868
				if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
869 870 871
					LockCheckConflicts(lockMethodTable,
									   lockmode,
									   lock,
872
									   proclock,
873
									   MyProc) == STATUS_OK)
874
				{
875
					/* Skip the wait and just grant myself the lock. */
876
					GrantLock(lock, proclock, lockmode);
877
					GrantAwaitedLock();
878
					return STATUS_OK;
879 880
				}
				/* Break out of loop to put myself before him */
V
Vadim B. Mikheev 已提交
881
				break;
882
			}
883
			/* Nope, so advance to next waiter */
884
			aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
885
			proc = (PGPROC *) proc->links.next;
V
Vadim B. Mikheev 已提交
886
		}
B
Bruce Momjian 已提交
887

888
		/*
B
Bruce Momjian 已提交
889 890
		 * If we fall out of loop normally, proc points to waitQueue head, so
		 * we will insert at tail of queue as desired.
891
		 */
892 893 894 895
	}
	else
	{
		/* I hold no locks, so I can't push in front of anyone. */
J
Jan Wieck 已提交
896
		proc = (PGPROC *) &(waitQueue->links);
V
Vadim B. Mikheev 已提交
897
	}
898

899
	/*
B
Bruce Momjian 已提交
900
	 * Insert self into queue, ahead of the given proc (or at tail of queue).
901
	 */
902
	SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
B
Bruce Momjian 已提交
903
	waitQueue->size++;
904

905
	lock->waitMask |= LOCKBIT_ON(lockmode);
906

J
Jan Wieck 已提交
907
	/* Set up wait information in PGPROC object, too */
908
	MyProc->waitLock = lock;
909
	MyProc->waitProcLock = proclock;
910 911
	MyProc->waitLockMode = lockmode;

912
	MyProc->waitStatus = STATUS_WAITING;
913 914

	/*
B
Bruce Momjian 已提交
915 916 917
	 * If we detected deadlock, give up without waiting.  This must agree with
	 * CheckDeadLock's recovery code, except that we shouldn't release the
	 * semaphore since we haven't tried to lock it yet.
918 919 920
	 */
	if (early_deadlock)
	{
921
		RemoveFromWaitQueue(MyProc, hashcode);
922 923
		return STATUS_ERROR;
	}
924

925
	/* mark that we are waiting for a lock */
926
	lockAwaited = locallock;
927

928
	/*
929
	 * Release the lock table's partition lock.
930
	 *
931
	 * NOTE: this may also cause us to exit critical-section state, possibly
B
Bruce Momjian 已提交
932 933
	 * allowing a cancel/die interrupt to be accepted. This is OK because we
	 * have recorded the fact that we are waiting for a lock, and so
934
	 * LockWaitCancel will clean up if cancel/die happens.
935
	 */
936
	LWLockRelease(partitionLock);
937

938 939 940
	/* Reset deadlock_state before enabling the signal handler */
	deadlock_state = DS_NOT_YET_CHECKED;

941
	/*
B
Bruce Momjian 已提交
942 943 944 945
	 * Set timer so we can wake up after awhile and check for a deadlock. If a
	 * deadlock is detected, the handler releases the process's semaphore and
	 * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
	 * must report failure rather than success.
946
	 *
947 948
	 * By delaying the check until we've waited for a bit, we can avoid
	 * running the rather expensive deadlock-check code in most cases.
949
	 */
950
	if (!enable_sig_alarm(DeadlockTimeout, false))
951
		elog(FATAL, "could not set timer for process wakeup");
952

953
	/*
954
	 * If someone wakes us between LWLockRelease and PGSemaphoreLock,
B
Bruce Momjian 已提交
955
	 * PGSemaphoreLock will not block.	The wakeup is "saved" by the semaphore
B
Bruce Momjian 已提交
956 957 958 959 960
	 * implementation.	While this is normally good, there are cases where a
	 * saved wakeup might be leftover from a previous operation (for example,
	 * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
	 * So, loop to wait again if the waitStatus shows we haven't been granted
	 * nor denied the lock yet.
961
	 *
962 963 964 965 966 967 968
	 * We pass interruptOK = true, which eliminates a window in which
	 * cancel/die interrupts would be held off undesirably.  This is a promise
	 * that we don't mind losing control to a cancel/die interrupt here.  We
	 * don't, because we have no shared-state-change work to do after being
	 * granted the lock (the grantor did it all).  We do have to worry about
	 * updating the locallock table, but if we lose control to an error,
	 * LockWaitCancel will fix that up.
969
	 */
B
Bruce Momjian 已提交
970 971
	do
	{
972
		PGSemaphoreLock(&MyProc->sem, true);
973

974 975
		/*
		 * waitStatus could change from STATUS_WAITING to something else
B
Bruce Momjian 已提交
976
		 * asynchronously.	Read it just once per loop to prevent surprising
977 978 979 980
		 * behavior (such as missing log messages).
		 */
		myWaitStatus = MyProc->waitStatus;

981 982
		/*
		 * If we are not deadlocked, but are waiting on an autovacuum-induced
B
Bruce Momjian 已提交
983
		 * task, send a signal to interrupt it.
984 985 986
		 */
		if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
		{
B
Bruce Momjian 已提交
987
			PGPROC	   *autovac = GetBlockingAutoVacuumPgproc();
988 989 990 991 992 993 994 995 996 997 998

			LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);

			/*
			 * Only do it if the worker is not working to protect against Xid
			 * wraparound.
			 */
			if ((autovac != NULL) &&
				(autovac->vacuumFlags & PROC_IS_AUTOVACUUM) &&
				!(autovac->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
			{
B
Bruce Momjian 已提交
999
				int			pid = autovac->pid;
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022

				elog(DEBUG2, "sending cancel to blocking autovacuum pid = %d",
					 pid);

				/* don't hold the lock across the kill() syscall */
				LWLockRelease(ProcArrayLock);

				/* send the autovacuum worker Back to Old Kent Road */
				if (kill(pid, SIGINT) < 0)
				{
					/* Just a warning to allow multiple callers */
					ereport(WARNING,
							(errmsg("could not send signal to process %d: %m",
									pid)));
				}
			}
			else
				LWLockRelease(ProcArrayLock);

			/* prevent signal from being resent more than once */
			allow_autovacuum_cancel = false;
		}

1023 1024 1025 1026
		/*
		 * If awoken after the deadlock check interrupt has run, and
		 * log_lock_waits is on, then report about the wait.
		 */
1027
		if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1028
		{
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
			StringInfoData buf;
			const char *modename;
			long		secs;
			int			usecs;
			long		msecs;

			initStringInfo(&buf);
			DescribeLockTag(&buf, &locallock->tag.lock);
			modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
									   lockmode);
			TimestampDifference(timeout_start_time, GetCurrentTimestamp(),
								&secs, &usecs);
			msecs = secs * 1000 + usecs / 1000;
			usecs = usecs % 1000;

			if (deadlock_state == DS_SOFT_DEADLOCK)
				ereport(LOG,
						(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
B
Bruce Momjian 已提交
1047
							  MyProcPid, modename, buf.data, msecs, usecs)));
1048
			else if (deadlock_state == DS_HARD_DEADLOCK)
1049
			{
1050
				/*
B
Bruce Momjian 已提交
1051 1052 1053 1054
				 * This message is a bit redundant with the error that will be
				 * reported subsequently, but in some cases the error report
				 * might not make it to the log (eg, if it's caught by an
				 * exception handler), and we want to ensure all long-wait
1055 1056 1057 1058
				 * events get logged.
				 */
				ereport(LOG,
						(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
B
Bruce Momjian 已提交
1059
							  MyProcPid, modename, buf.data, msecs, usecs)));
1060
			}
1061 1062 1063 1064

			if (myWaitStatus == STATUS_WAITING)
				ereport(LOG,
						(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
B
Bruce Momjian 已提交
1065
							  MyProcPid, modename, buf.data, msecs, usecs)));
1066 1067
			else if (myWaitStatus == STATUS_OK)
				ereport(LOG,
B
Bruce Momjian 已提交
1068 1069
					(errmsg("process %d acquired %s on %s after %ld.%03d ms",
							MyProcPid, modename, buf.data, msecs, usecs)));
1070 1071 1072
			else
			{
				Assert(myWaitStatus == STATUS_ERROR);
B
Bruce Momjian 已提交
1073

1074 1075
				/*
				 * Currently, the deadlock checker always kicks its own
B
Bruce Momjian 已提交
1076 1077 1078 1079 1080
				 * process, which means that we'll only see STATUS_ERROR when
				 * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
				 * print redundant messages.  But for completeness and
				 * future-proofing, print a message if it looks like someone
				 * else kicked us off the lock.
1081 1082 1083 1084
				 */
				if (deadlock_state != DS_HARD_DEADLOCK)
					ereport(LOG,
							(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
B
Bruce Momjian 已提交
1085
							  MyProcPid, modename, buf.data, msecs, usecs)));
1086 1087 1088
			}

			/*
B
Bruce Momjian 已提交
1089 1090
			 * At this point we might still need to wait for the lock. Reset
			 * state so we don't print the above messages again.
1091 1092 1093 1094
			 */
			deadlock_state = DS_NO_DEADLOCK;

			pfree(buf.data);
1095
		}
1096
	} while (myWaitStatus == STATUS_WAITING);
1097

1098
	/*
1099
	 * Disable the timer, if it's still running
B
Bruce Momjian 已提交
1100
	 */
1101
	if (!disable_sig_alarm(false))
1102
		elog(FATAL, "could not disable timer for process wakeup");
B
Bruce Momjian 已提交
1103

1104
	/*
B
Bruce Momjian 已提交
1105 1106 1107
	 * Re-acquire the lock table's partition lock.  We have to do this to hold
	 * off cancel/die interrupts before we can mess with lockAwaited (else we
	 * might have a missed or duplicated locallock update).
1108
	 */
1109
	LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1110 1111 1112

	/*
	 * We no longer want LockWaitCancel to do anything.
1113
	 */
1114
	lockAwaited = NULL;
1115

1116
	/*
1117
	 * If we got the lock, be sure to remember it in the locallock table.
1118
	 */
1119
	if (MyProc->waitStatus == STATUS_OK)
1120
		GrantAwaitedLock();
1121

1122 1123 1124 1125
	/*
	 * We don't have to do anything else, because the awaker did all the
	 * necessary update of the lock table and MyProc.
	 */
1126
	return MyProc->waitStatus;
1127 1128 1129 1130 1131 1132
}


/*
 * ProcWakeup -- wake up a process by releasing its private semaphore.
 *
1133
 *	 Also remove the process from the wait queue and set its links invalid.
1134
 *	 RETURN: the next process in the wait queue.
1135
 *
1136 1137
 * The appropriate lock partition lock must be held by caller.
 *
1138 1139 1140
 * XXX: presently, this code is only used for the "success" case, and only
 * works correctly for that case.  To clean up in failure case, would need
 * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1141
 * Hence, in practice the waitStatus parameter must be STATUS_OK.
1142
 */
J
Jan Wieck 已提交
1143
PGPROC *
1144
ProcWakeup(PGPROC *proc, int waitStatus)
1145
{
J
Jan Wieck 已提交
1146
	PGPROC	   *retProc;
1147

1148
	/* Proc should be sleeping ... */
1149 1150
	if (proc->links.prev == NULL ||
		proc->links.next == NULL)
1151
		return NULL;
1152
	Assert(proc->waitStatus == STATUS_WAITING);
1153

1154
	/* Save next process before we zap the list link */
1155
	retProc = (PGPROC *) proc->links.next;
1156

1157
	/* Remove process from wait queue */
1158
	SHMQueueDelete(&(proc->links));
1159
	(proc->waitLock->waitProcs.size)--;
1160

1161 1162
	/* Clean up process' state and pass it the ok/fail signal */
	proc->waitLock = NULL;
1163
	proc->waitProcLock = NULL;
1164
	proc->waitStatus = waitStatus;
1165

1166
	/* And awaken it */
1167
	PGSemaphoreUnlock(&proc->sem);
1168 1169

	return retProc;
1170 1171 1172 1173
}

/*
 * ProcLockWakeup -- routine for waking up processes when a lock is
1174 1175
 *		released (or a prior waiter is aborted).  Scan all waiters
 *		for lock, waken any that are no longer blocked.
1176 1177
 *
 * The appropriate lock partition lock must be held by caller.
1178
 */
1179
void
1180
ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1181
{
1182 1183
	PROC_QUEUE *waitQueue = &(lock->waitProcs);
	int			queue_size = waitQueue->size;
J
Jan Wieck 已提交
1184
	PGPROC	   *proc;
1185
	LOCKMASK	aheadRequests = 0;
M
 
Marc G. Fournier 已提交
1186

1187
	Assert(queue_size >= 0);
1188

1189 1190
	if (queue_size == 0)
		return;
1191

1192
	proc = (PGPROC *) waitQueue->links.next;
1193

1194 1195
	while (queue_size-- > 0)
	{
B
Bruce Momjian 已提交
1196
		LOCKMODE	lockmode = proc->waitLockMode;
M
 
Marc G. Fournier 已提交
1197 1198

		/*
B
Bruce Momjian 已提交
1199 1200
		 * Waken if (a) doesn't conflict with requests of earlier waiters, and
		 * (b) doesn't conflict with already-held locks.
M
 
Marc G. Fournier 已提交
1201
		 */
B
Bruce Momjian 已提交
1202
		if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1203 1204 1205
			LockCheckConflicts(lockMethodTable,
							   lockmode,
							   lock,
1206
							   proc->waitProcLock,
1207
							   proc) == STATUS_OK)
M
 
Marc G. Fournier 已提交
1208
		{
1209
			/* OK to waken */
1210
			GrantLock(lock, proc->waitProcLock, lockmode);
1211
			proc = ProcWakeup(proc, STATUS_OK);
B
Bruce Momjian 已提交
1212

1213
			/*
B
Bruce Momjian 已提交
1214 1215 1216
			 * ProcWakeup removes proc from the lock's waiting process queue
			 * and returns the next proc in chain; don't use proc's next-link,
			 * because it's been cleared.
1217
			 */
M
 
Marc G. Fournier 已提交
1218
		}
1219
		else
1220
		{
B
Bruce Momjian 已提交
1221
			/*
B
Bruce Momjian 已提交
1222
			 * Cannot wake this guy. Remember his request for later checks.
B
Bruce Momjian 已提交
1223
			 */
1224
			aheadRequests |= LOCKBIT_ON(lockmode);
1225
			proc = (PGPROC *) proc->links.next;
1226
		}
M
 
Marc G. Fournier 已提交
1227
	}
1228 1229

	Assert(waitQueue->size >= 0);
1230 1231
}

1232 1233 1234
/*
 * CheckDeadLock
 *
1235
 * We only get to this routine if we got SIGALRM after DeadlockTimeout
1236 1237
 * while waiting for a lock to be released by some other process.  Look
 * to see if there's a deadlock; if not, just return and continue waiting.
1238
 * (But signal ProcSleep to log a message, if log_lock_waits is true.)
1239 1240
 * If we have a real deadlock, remove ourselves from the lock's wait queue
 * and signal an error to ProcSleep.
1241 1242 1243
 *
 * NB: this is run inside a signal handler, so be very wary about what is done
 * here or in called routines.
1244
 */
1245
static void
1246
CheckDeadLock(void)
1247
{
1248 1249
	int			i;

1250
	/*
B
Bruce Momjian 已提交
1251 1252
	 * Acquire exclusive lock on the entire shared lock data structures. Must
	 * grab LWLocks in partition-number order to avoid LWLock deadlock.
1253 1254 1255 1256 1257 1258
	 *
	 * Note that the deadlock check interrupt had better not be enabled
	 * anywhere that this process itself holds lock partition locks, else this
	 * will wait forever.  Also note that LWLockAcquire creates a critical
	 * section, so that this routine cannot be interrupted by cancel/die
	 * interrupts.
1259
	 */
1260 1261
	for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
		LWLockAcquire(FirstLockMgrLock + i, LW_EXCLUSIVE);
1262

1263
	/*
1264 1265
	 * Check to see if we've been awoken by anyone in the interim.
	 *
1266
	 * If we have, we can return and resume our transaction -- happy day.
1267 1268
	 * Before we are awoken the process releasing the lock grants it to us so
	 * we know that we don't have to wait anymore.
1269
	 *
1270
	 * We check by looking to see if we've been unlinked from the wait queue.
B
Bruce Momjian 已提交
1271
	 * This is quicker than checking our semaphore's state, since no kernel
1272
	 * call is needed, and it is safe because we hold the lock partition lock.
1273
	 */
1274 1275
	if (MyProc->links.prev == NULL ||
		MyProc->links.next == NULL)
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
		goto check_done;

#ifdef LOCK_DEBUG
	if (Debug_deadlocks)
		DumpAllLocks();
#endif

	/* Run the deadlock check, and set deadlock_state for use by ProcSleep */
	deadlock_state = DeadLockCheck(MyProc);

1286
	if (deadlock_state == DS_HARD_DEADLOCK)
B
Bruce Momjian 已提交
1287
	{
1288 1289 1290
		/*
		 * Oops.  We have a deadlock.
		 *
1291 1292 1293 1294
		 * Get this process out of wait state. (Note: we could do this more
		 * efficiently by relying on lockAwaited, but use this coding to
		 * preserve the flexibility to kill some other transaction than the
		 * one detecting the deadlock.)
1295 1296
		 *
		 * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
1297 1298
		 * ProcSleep will report an error after we return from the signal
		 * handler.
1299 1300 1301
		 */
		Assert(MyProc->waitLock != NULL);
		RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1302

1303 1304 1305 1306 1307
		/*
		 * Unlock my semaphore so that the interrupted ProcSleep() call can
		 * finish.
		 */
		PGSemaphoreUnlock(&MyProc->sem);
1308

1309
		/*
1310 1311 1312 1313 1314 1315 1316 1317
		 * We're done here.  Transaction abort caused by the error that
		 * ProcSleep will raise will cause any other locks we hold to be
		 * released, thus allowing other processes to wake up; we don't need
		 * to do that here.  NOTE: an exception is that releasing locks we
		 * hold doesn't consider the possibility of waiters that were blocked
		 * behind us on the lock we just failed to get, and might now be
		 * wakable because we're not in front of them anymore.  However,
		 * RemoveFromWaitQueue took care of waking up any such processes.
1318 1319
		 */
	}
1320
	else if (log_lock_waits || deadlock_state == DS_BLOCKED_BY_AUTOVACUUM)
1321 1322 1323 1324
	{
		/*
		 * Unlock my semaphore so that the interrupted ProcSleep() call can
		 * print the log message (we daren't do it here because we are inside
B
Bruce Momjian 已提交
1325 1326
		 * a signal handler).  It will then sleep again until someone releases
		 * the lock.
1327 1328 1329
		 *
		 * If blocked by autovacuum, this wakeup will enable ProcSleep to send
		 * the cancelling signal to the autovacuum worker.
1330 1331 1332
		 */
		PGSemaphoreUnlock(&MyProc->sem);
	}
1333 1334

	/*
B
Bruce Momjian 已提交
1335 1336 1337 1338 1339
	 * And release locks.  We do this in reverse order for two reasons: (1)
	 * Anyone else who needs more than one of the locks will be trying to lock
	 * them in increasing order; we don't want to release the other process
	 * until it can get all the locks it needs. (2) This avoids O(N^2)
	 * behavior inside LWLockRelease.
1340
	 */
1341
check_done:
B
Bruce Momjian 已提交
1342
	for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1343
		LWLockRelease(FirstLockMgrLock + i);
1344 1345 1346
}


1347 1348 1349 1350 1351 1352
/*
 * ProcWaitForSignal - wait for a signal from another backend.
 *
 * This can share the semaphore normally used for waiting for locks,
 * since a backend could never be waiting for a lock and a signal at
 * the same time.  As with locks, it's OK if the signal arrives just
B
Bruce Momjian 已提交
1353
 * before we actually reach the waiting state.	Also as with locks,
1354 1355
 * it's necessary that the caller be robust against bogus wakeups:
 * always check that the desired state has occurred, and wait again
B
Bruce Momjian 已提交
1356
 * if not.	This copes with possible "leftover" wakeups.
1357 1358 1359 1360
 */
void
ProcWaitForSignal(void)
{
1361
	PGSemaphoreLock(&MyProc->sem, true);
1362 1363 1364
}

/*
1365
 * ProcSendSignal - send a signal to a backend identified by PID
1366 1367
 */
void
1368
ProcSendSignal(int pid)
1369
{
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
	PGPROC	   *proc = NULL;

	if (RecoveryInProgress())
	{
		/* use volatile pointer to prevent code rearrangement */
		volatile PROC_HDR *procglobal = ProcGlobal;

		SpinLockAcquire(ProcStructLock);

		/*
		 * Check to see whether it is the Startup process we wish to signal.
B
Bruce Momjian 已提交
1381 1382
		 * This call is made by the buffer manager when it wishes to wake up a
		 * process that has been waiting for a pin in so it can obtain a
1383
		 * cleanup lock using LockBufferForCleanup(). Startup is not a normal
B
Bruce Momjian 已提交
1384 1385
		 * backend, so BackendPidGetProc() will not return any pid at all. So
		 * we remember the information for this special case.
1386 1387 1388 1389 1390 1391 1392 1393 1394
		 */
		if (pid == procglobal->startupProcPid)
			proc = procglobal->startupProc;

		SpinLockRelease(ProcStructLock);
	}

	if (proc == NULL)
		proc = BackendPidGetProc(pid);
1395 1396

	if (proc != NULL)
1397
		PGSemaphoreUnlock(&proc->sem);
1398 1399 1400
}


1401 1402 1403 1404 1405 1406 1407 1408 1409
/*****************************************************************************
 * SIGALRM interrupt support
 *
 * Maybe these should be in pqsignal.c?
 *****************************************************************************/

/*
 * Enable the SIGALRM interrupt to fire after the specified delay
 *
1410
 * Delay is given in milliseconds.	Caller should be sure a SIGALRM
1411 1412
 * signal handler is installed before this is called.
 *
1413 1414
 * This code properly handles nesting of deadlock timeout alarms within
 * statement timeout alarms.
1415
 *
1416 1417 1418
 * Returns TRUE if okay, FALSE on failure.
 */
bool
1419
enable_sig_alarm(int delayms, bool is_statement_timeout)
1420
{
1421
	TimestampTz fin_time;
1422
	struct itimerval timeval;
1423

1424 1425
	if (is_statement_timeout)
	{
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
		/*
		 * Begin statement-level timeout
		 *
		 * Note that we compute statement_fin_time with reference to the
		 * statement_timestamp, but apply the specified delay without any
		 * correction; that is, we ignore whatever time has elapsed since
		 * statement_timestamp was set.  In the normal case only a small
		 * interval will have elapsed and so this doesn't matter, but there
		 * are corner cases (involving multi-statement query strings with
		 * embedded COMMIT or ROLLBACK) where we might re-initialize the
B
Bruce Momjian 已提交
1436 1437 1438 1439
		 * statement timeout long after initial receipt of the message. In
		 * such cases the enforcement of the statement timeout will be a bit
		 * inconsistent.  This annoyance is judged not worth the cost of
		 * performing an additional gettimeofday() here.
1440
		 */
1441
		Assert(!deadlock_timeout_active);
1442 1443
		fin_time = GetCurrentStatementStartTimestamp();
		fin_time = TimestampTzPlusMilliseconds(fin_time, delayms);
1444
		statement_fin_time = fin_time;
1445
		cancel_from_timeout = false;
1446
		statement_timeout_active = true;
1447 1448 1449 1450 1451 1452
	}
	else if (statement_timeout_active)
	{
		/*
		 * Begin deadlock timeout with statement-level timeout active
		 *
1453 1454 1455 1456
		 * Here, we want to interrupt at the closer of the two timeout times.
		 * If fin_time >= statement_fin_time then we need not touch the
		 * existing timer setting; else set up to interrupt at the deadlock
		 * timeout time.
1457 1458 1459
		 *
		 * NOTE: in this case it is possible that this routine will be
		 * interrupted by the previously-set timer alarm.  This is okay
B
Bruce Momjian 已提交
1460 1461 1462
		 * because the signal handler will do only what it should do according
		 * to the state variables.	The deadlock checker may get run earlier
		 * than normal, but that does no harm.
1463
		 */
1464 1465
		timeout_start_time = GetCurrentTimestamp();
		fin_time = TimestampTzPlusMilliseconds(timeout_start_time, delayms);
1466
		deadlock_timeout_active = true;
1467
		if (fin_time >= statement_fin_time)
1468 1469 1470 1471 1472 1473
			return true;
	}
	else
	{
		/* Begin deadlock timeout with no statement-level timeout */
		deadlock_timeout_active = true;
1474 1475 1476
		/* GetCurrentTimestamp can be expensive, so only do it if we must */
		if (log_lock_waits)
			timeout_start_time = GetCurrentTimestamp();
1477
	}
1478

1479
	/* If we reach here, okay to set the timer interrupt */
1480
	MemSet(&timeval, 0, sizeof(struct itimerval));
1481 1482
	timeval.it_value.tv_sec = delayms / 1000;
	timeval.it_value.tv_usec = (delayms % 1000) * 1000;
1483
	if (setitimer(ITIMER_REAL, &timeval, NULL))
1484
		return false;
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
	return true;
}

/*
 * Cancel the SIGALRM timer, either for a deadlock timeout or a statement
 * timeout.  If a deadlock timeout is canceled, any active statement timeout
 * remains in force.
 *
 * Returns TRUE if okay, FALSE on failure.
 */
bool
disable_sig_alarm(bool is_statement_timeout)
{
	/*
	 * Always disable the interrupt if it is active; this avoids being
	 * interrupted by the signal handler and thereby possibly getting
	 * confused.
	 *
	 * We will re-enable the interrupt if necessary in CheckStatementTimeout.
	 */
	if (statement_timeout_active || deadlock_timeout_active)
1506
	{
1507
		struct itimerval timeval;
1508

1509
		MemSet(&timeval, 0, sizeof(struct itimerval));
1510
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1511
		{
1512 1513 1514
			statement_timeout_active = false;
			cancel_from_timeout = false;
			deadlock_timeout_active = false;
1515 1516
			return false;
		}
1517 1518
	}

1519 1520 1521 1522
	/* Always cancel deadlock timeout, in case this is error cleanup */
	deadlock_timeout_active = false;

	/* Cancel or reschedule statement timeout */
1523
	if (is_statement_timeout)
1524
	{
1525
		statement_timeout_active = false;
1526 1527
		cancel_from_timeout = false;
	}
1528 1529 1530 1531 1532
	else if (statement_timeout_active)
	{
		if (!CheckStatementTimeout())
			return false;
	}
1533 1534 1535
	return true;
}

1536

1537
/*
1538 1539 1540
 * Check for statement timeout.  If the timeout time has come,
 * trigger a query-cancel interrupt; if not, reschedule the SIGALRM
 * interrupt to occur at the right time.
1541
 *
1542
 * Returns true if okay, false if failed to set the interrupt.
1543
 */
1544 1545
static bool
CheckStatementTimeout(void)
1546
{
1547
	TimestampTz now;
B
Bruce Momjian 已提交
1548

1549 1550 1551
	if (!statement_timeout_active)
		return true;			/* do nothing if not active */

1552
	now = GetCurrentTimestamp();
1553

1554
	if (now >= statement_fin_time)
1555
	{
1556 1557
		/* Time to die */
		statement_timeout_active = false;
1558
		cancel_from_timeout = true;
1559 1560 1561 1562
#ifdef HAVE_SETSID
		/* try to signal whole process group */
		kill(-MyProcPid, SIGINT);
#endif
1563
		kill(MyProcPid, SIGINT);
1564 1565 1566 1567
	}
	else
	{
		/* Not time yet, so (re)schedule the interrupt */
1568 1569
		long		secs;
		int			usecs;
1570 1571
		struct itimerval timeval;

1572 1573
		TimestampDifference(now, statement_fin_time,
							&secs, &usecs);
B
Bruce Momjian 已提交
1574

1575 1576 1577 1578 1579 1580
		/*
		 * It's possible that the difference is less than a microsecond;
		 * ensure we don't cancel, rather than set, the interrupt.
		 */
		if (secs == 0 && usecs == 0)
			usecs = 1;
1581
		MemSet(&timeval, 0, sizeof(struct itimerval));
1582 1583
		timeval.it_value.tv_sec = secs;
		timeval.it_value.tv_usec = usecs;
1584
		if (setitimer(ITIMER_REAL, &timeval, NULL))
1585 1586 1587
			return false;
	}

1588 1589
	return true;
}
1590 1591 1592


/*
1593
 * Signal handler for SIGALRM for normal user backends
1594 1595 1596 1597 1598
 *
 * Process deadlock check and/or statement timeout check, as needed.
 * To avoid various edge cases, we must be careful to do nothing
 * when there is nothing to be done.  We also need to be able to
 * reschedule the timer interrupt if called before end of statement.
1599 1600 1601 1602
 */
void
handle_sig_alarm(SIGNAL_ARGS)
{
1603 1604 1605
	int			save_errno = errno;

	if (deadlock_timeout_active)
1606
	{
1607
		deadlock_timeout_active = false;
1608 1609
		CheckDeadLock();
	}
1610 1611 1612 1613 1614

	if (statement_timeout_active)
		(void) CheckStatementTimeout();

	errno = save_errno;
1615
}
1616 1617 1618 1619 1620 1621 1622

/*
 * Signal handler for SIGALRM in Startup process
 *
 * To avoid various edge cases, we must be careful to do nothing
 * when there is nothing to be done.  We also need to be able to
 * reschedule the timer interrupt if called before end of statement.
1623 1624 1625
 *
 * We set either deadlock_timeout_active or statement_timeout_active
 * or both. Interrupts are enabled if standby_timeout_active.
1626 1627
 */
bool
1628
enable_standby_sig_alarm(TimestampTz now, TimestampTz fin_time, bool deadlock_only)
1629
{
1630
	TimestampTz deadlock_time = TimestampTzPlusMilliseconds(now, DeadlockTimeout);
1631

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
	if (deadlock_only)
	{
		/*
		 * Wake up at DeadlockTimeout only, then wait forever
		 */
		statement_fin_time = deadlock_time;
		deadlock_timeout_active = true;
		statement_timeout_active = false;
	}
	else if (fin_time > deadlock_time)
	{
		/*
		 * Wake up at DeadlockTimeout, then again at MaxStandbyDelay
		 */
		statement_fin_time = deadlock_time;
		statement_fin_time2 = fin_time;
		deadlock_timeout_active = true;
		statement_timeout_active = true;
	}
	else
	{
		/*
		 * Wake only at MaxStandbyDelay because its fairly soon
		 */
		statement_fin_time = fin_time;
		deadlock_timeout_active = false;
		statement_timeout_active = true;
	}
1660

1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
	if (deadlock_timeout_active || statement_timeout_active)
	{
		long		secs;
		int			usecs;
		struct itimerval timeval;
		TimestampDifference(now, statement_fin_time,
							&secs, &usecs);
		if (secs == 0 && usecs == 0)
			usecs = 1;
		MemSet(&timeval, 0, sizeof(struct itimerval));
		timeval.it_value.tv_sec = secs;
		timeval.it_value.tv_usec = usecs;
		if (setitimer(ITIMER_REAL, &timeval, NULL))
			return false;
		standby_timeout_active = true;
	}
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716

	return true;
}

bool
disable_standby_sig_alarm(void)
{
	/*
	 * Always disable the interrupt if it is active; this avoids being
	 * interrupted by the signal handler and thereby possibly getting
	 * confused.
	 *
	 * We will re-enable the interrupt if necessary in CheckStandbyTimeout.
	 */
	if (standby_timeout_active)
	{
		struct itimerval timeval;

		MemSet(&timeval, 0, sizeof(struct itimerval));
		if (setitimer(ITIMER_REAL, &timeval, NULL))
		{
			standby_timeout_active = false;
			return false;
		}
	}

	standby_timeout_active = false;

	return true;
}

/*
 * CheckStandbyTimeout() runs unconditionally in the Startup process
 * SIGALRM handler. Timers will only be set when InHotStandby.
 * We simply ignore any signals unless the timer has been set.
 */
static bool
CheckStandbyTimeout(void)
{
	TimestampTz now;
1717
	bool reschedule = false;
1718 1719 1720 1721 1722

	standby_timeout_active = false;

	now = GetCurrentTimestamp();

1723 1724 1725 1726
	/*
	 * Reschedule the timer if its not time to wake yet, or if we
	 * have both timers set and the first one has just been reached.
	 */
1727
	if (now >= statement_fin_time)
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	{
		if (deadlock_timeout_active)
		{
			/*
			 * We're still waiting when we reach DeadlockTimeout, so send out a request
			 * to have other backends check themselves for deadlock. Then continue
			 * waiting until MaxStandbyDelay.
			 */
			SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
			deadlock_timeout_active = false;

			/*
			 * Begin second waiting period to MaxStandbyDelay if required.
			 */
			if (statement_timeout_active)
			{
				reschedule = true;
				statement_fin_time = statement_fin_time2;
			}
		}
		else
		{
			/*
			 * We've now reached MaxStandbyDelay, so ask all conflicts to leave, cos
			 * its time for us to press ahead with applying changes in recovery.
			 */
			SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
		}
	}
1757
	else
1758 1759 1760
		reschedule = true;

	if (reschedule)
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	{
		long		secs;
		int			usecs;
		struct itimerval timeval;
		TimestampDifference(now, statement_fin_time,
							&secs, &usecs);
		if (secs == 0 && usecs == 0)
			usecs = 1;
		MemSet(&timeval, 0, sizeof(struct itimerval));
		timeval.it_value.tv_sec = secs;
		timeval.it_value.tv_usec = usecs;
		if (setitimer(ITIMER_REAL, &timeval, NULL))
			return false;
1774
		standby_timeout_active = true;
1775 1776 1777 1778 1779 1780 1781 1782
	}

	return true;
}

void
handle_standby_sig_alarm(SIGNAL_ARGS)
{
B
Bruce Momjian 已提交
1783
	int			save_errno = errno;
1784 1785 1786 1787 1788 1789

	if (standby_timeout_active)
		(void) CheckStandbyTimeout();

	errno = save_errno;
}