unix_latch.c 20.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*-------------------------------------------------------------------------
 *
 * unix_latch.c
 *	  Routines for inter-process latches
 *
 * The Unix implementation uses the so-called self-pipe trick to overcome
 * the race condition involved with select() and setting a global flag
 * in the signal handler. When a latch is set and the current process
 * is waiting for it, the signal handler wakes up the select() in
 * WaitLatch by writing a byte to a pipe. A signal by itself doesn't
 * interrupt select() on all platforms, and even on platforms where it
 * does, a signal that arrives just before the select() call does not
 * prevent the select() from entering sleep. An incoming byte on a pipe
14 15
 * however reliably interrupts the sleep, and causes select() to return
 * immediately even if the signal arrives before select() begins.
16
 *
17 18 19
 * (Actually, we prefer poll() over select() where available, but the
 * same comments apply to it.)
 *
20 21 22 23 24
 * When SetLatch is called from the same process that owns the latch,
 * SetLatch writes the byte directly to the pipe. If it's owned by another
 * process, SIGUSR1 is sent and the signal handler in the waiting process
 * writes the byte to the pipe on behalf of the signaling process.
 *
B
Bruce Momjian 已提交
25
 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
26 27 28
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 * IDENTIFICATION
29
 *	  src/backend/port/unix_latch.c
30 31 32 33 34 35
 *
 *-------------------------------------------------------------------------
 */
#include "postgres.h"

#include <fcntl.h>
36
#include <limits.h>
37 38
#include <signal.h>
#include <unistd.h>
39 40
#include <sys/time.h>
#include <sys/types.h>
41 42 43 44 45 46
#ifdef HAVE_POLL_H
#include <poll.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
47 48 49
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
50 51

#include "miscadmin.h"
52
#include "portability/instr_time.h"
53
#include "postmaster/postmaster.h"
A
Andres Freund 已提交
54
#include "storage/barrier.h"
55
#include "storage/latch.h"
56
#include "storage/pmsignal.h"
57 58
#include "storage/shmem.h"

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/*
 * Select the fd readiness primitive to use. Normally the "most modern"
 * primitive supported by the OS will be used, but for testing it can be
 * useful to manually specify the used primitive.  If desired, just add a
 * define somewhere before this block.
 */
#if defined(LATCH_USE_POLL) || defined(LATCH_USE_SELECT)
/* don't overwrite manual choice */
#elif defined(HAVE_POLL)
#define LATCH_USE_POLL
#elif HAVE_SYS_SELECT_H
#define LATCH_USE_SELECT
#else
#error "no latch implementation available"
#endif

75 76 77
/* Are we currently in WaitLatch? The signal handler would like to know. */
static volatile sig_atomic_t waiting = false;

78
/* Read and write ends of the self-pipe */
79 80
static int	selfpipe_readfd = -1;
static int	selfpipe_writefd = -1;
81

82
/* Private function prototypes */
83
static void sendSelfPipeByte(void);
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
static void drainSelfPipe(void);


/*
 * Initialize the process-local latch infrastructure.
 *
 * This must be called once during startup of any process that can wait on
 * latches, before it issues any InitLatch() or OwnLatch() calls.
 */
void
InitializeLatchSupport(void)
{
	int			pipefd[2];

	Assert(selfpipe_readfd == -1);

	/*
	 * Set up the self-pipe that allows a signal handler to wake up the
	 * select() in WaitLatch. Make the write-end non-blocking, so that
	 * SetLatch won't block if the event has already been set many times
	 * filling the kernel buffer. Make the read-end non-blocking too, so that
	 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
	 */
	if (pipe(pipefd) < 0)
		elog(FATAL, "pipe() failed: %m");
	if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) < 0)
		elog(FATAL, "fcntl() failed on read-end of self-pipe: %m");
	if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) < 0)
		elog(FATAL, "fcntl() failed on write-end of self-pipe: %m");
113

114 115 116
	selfpipe_readfd = pipefd[0];
	selfpipe_writefd = pipefd[1];
}
117 118 119 120 121 122 123

/*
 * Initialize a backend-local latch.
 */
void
InitLatch(volatile Latch *latch)
{
124 125
	/* Assert InitializeLatchSupport has been called in this process */
	Assert(selfpipe_readfd >= 0);
126 127 128 129 130 131 132 133

	latch->is_set = false;
	latch->owner_pid = MyProcPid;
	latch->is_shared = false;
}

/*
 * Initialize a shared latch that can be set from other processes. The latch
134
 * is initially owned by no-one; use OwnLatch to associate it with the
135 136
 * current process.
 *
137 138
 * InitSharedLatch needs to be called in postmaster before forking child
 * processes, usually right after allocating the shared memory block
139 140 141
 * containing the latch with ShmemInitStruct. (The Unix implementation
 * doesn't actually require that, but the Windows one does.) Because of
 * this restriction, we have no concurrency issues to worry about here.
142 143 144 145 146 147 148 149 150 151 152
 */
void
InitSharedLatch(volatile Latch *latch)
{
	latch->is_set = false;
	latch->owner_pid = 0;
	latch->is_shared = true;
}

/*
 * Associate a shared latch with the current process, allowing it to
153 154 155 156 157 158
 * wait on the latch.
 *
 * Although there is a sanity check for latch-already-owned, we don't do
 * any sort of locking here, meaning that we could fail to detect the error
 * if two processes try to own the same latch at about the same time.  If
 * there is any risk of that, caller must provide an interlock to prevent it.
159
 *
160 161 162
 * In any process that calls OwnLatch(), make sure that
 * latch_sigusr1_handler() is called from the SIGUSR1 signal handler,
 * as shared latches use SIGUSR1 for inter-process communication.
163 164 165 166
 */
void
OwnLatch(volatile Latch *latch)
{
167 168
	/* Assert InitializeLatchSupport has been called in this process */
	Assert(selfpipe_readfd >= 0);
169

170
	Assert(latch->is_shared);
171

172
	/* sanity check */
173 174
	if (latch->owner_pid != 0)
		elog(ERROR, "latch already owned");
175

176 177 178 179 180 181 182 183 184 185 186
	latch->owner_pid = MyProcPid;
}

/*
 * Disown a shared latch currently owned by the current process.
 */
void
DisownLatch(volatile Latch *latch)
{
	Assert(latch->is_shared);
	Assert(latch->owner_pid == MyProcPid);
187

188 189 190 191
	latch->owner_pid = 0;
}

/*
192 193
 * Wait for a given latch to be set, or for postmaster death, or until timeout
 * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
194 195
 * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
 * function returns immediately.
196
 *
197 198 199 200
 * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
 * is given.  Although it is declared as "long", we don't actually support
 * timeouts longer than INT_MAX milliseconds.  Note that some extra overhead
 * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
201 202 203 204 205
 *
 * The latch must be owned by the current process, ie. it must be a
 * backend-local latch initialized with InitLatch, or a shared latch
 * associated with the current process by calling OwnLatch.
 *
206
 * Returns bit mask indicating which condition(s) caused the wake-up. Note
207
 * that if multiple wake-up conditions are true, there is no guarantee that
208
 * we return all of them in one call, but we will return at least one.
209
 */
210 211
int
WaitLatch(volatile Latch *latch, int wakeEvents, long timeout)
212
{
213
	return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout);
214 215 216
}

/*
217 218
 * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
 * conditions.
219
 *
220 221 222
 * When waiting on a socket, EOF and error conditions are reported by
 * returning the socket as readable/writable or both, depending on
 * WL_SOCKET_READABLE/WL_SOCKET_WRITEABLE being specified.
223 224
 */
int
225 226
WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
				  long timeout)
227
{
228 229
	int			result = 0;
	int			rc;
230 231 232
	instr_time	start_time,
				cur_time;
	long		cur_timeout;
233

234
#if defined(LATCH_USE_POLL)
235 236
	struct pollfd pfds[3];
	int			nfds;
237
#elif defined(LATCH_USE_SELECT)
238
	struct timeval tv,
239
			   *tvp;
240
	fd_set		input_mask;
241
	fd_set		output_mask;
242 243
	int			hifd;
#endif
244

245 246
	Assert(wakeEvents != 0);	/* must have at least one wake event */

247 248 249 250 251
	/* waiting for socket readiness without a socket indicates a bug */
	if (sock == PGINVALID_SOCKET &&
		(wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) != 0)
		elog(ERROR, "cannot wait on socket event without a socket");

252
	if ((wakeEvents & WL_LATCH_SET) && latch->owner_pid != MyProcPid)
253 254
		elog(ERROR, "cannot wait on a latch owned by another process");

255 256 257
	/*
	 * Initialize timeout if requested.  We must record the current time so
	 * that we can determine the remaining timeout if the poll() or select()
B
Bruce Momjian 已提交
258
	 * is interrupted.  (On some platforms, select() will update the contents
259 260
	 * of "tv" for us, but unfortunately we can't rely on that.)
	 */
261
	if (wakeEvents & WL_TIMEOUT)
262
	{
263
		INSTR_TIME_SET_CURRENT(start_time);
264
		Assert(timeout >= 0 && timeout <= INT_MAX);
265 266
		cur_timeout = timeout;

267
#ifdef LATCH_USE_SELECT
268 269
		tv.tv_sec = cur_timeout / 1000L;
		tv.tv_usec = (cur_timeout % 1000L) * 1000L;
270
		tvp = &tv;
271 272 273 274
#endif
	}
	else
	{
275 276
		cur_timeout = -1;

277
#ifdef LATCH_USE_SELECT
278
		tvp = NULL;
279
#endif
280 281 282
	}

	waiting = true;
283
	do
284 285
	{
		/*
286
		 * Clear the pipe, then check if the latch is set already. If someone
287 288 289 290
		 * sets the latch between this and the poll()/select() below, the
		 * setter will write a byte to the pipe (or signal us and the signal
		 * handler will do that), and the poll()/select() will return
		 * immediately.
291 292 293
		 *
		 * Note: we assume that the kernel calls involved in drainSelfPipe()
		 * and SetLatch() will provide adequate synchronization on machines
294 295
		 * with weak memory ordering, so that we cannot miss seeing is_set if
		 * the signal byte is already in the pipe when we drain it.
296 297
		 */
		drainSelfPipe();
298

299
		if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
300
		{
301
			result |= WL_LATCH_SET;
302

303 304 305 306
			/*
			 * Leave loop immediately, avoid blocking again. We don't attempt
			 * to report any other events that might also be satisfied.
			 */
307 308 309
			break;
		}

310
		/*
311 312
		 * Must wait ... we use the polling interface determined at the top of
		 * this file to do so.
313
		 */
314
#if defined(LATCH_USE_POLL)
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
		nfds = 0;
		if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
		{
			/* socket, if used, is always in pfds[0] */
			pfds[0].fd = sock;
			pfds[0].events = 0;
			if (wakeEvents & WL_SOCKET_READABLE)
				pfds[0].events |= POLLIN;
			if (wakeEvents & WL_SOCKET_WRITEABLE)
				pfds[0].events |= POLLOUT;
			pfds[0].revents = 0;
			nfds++;
		}

		pfds[nfds].fd = selfpipe_readfd;
		pfds[nfds].events = POLLIN;
		pfds[nfds].revents = 0;
		nfds++;

		if (wakeEvents & WL_POSTMASTER_DEATH)
		{
			/* postmaster fd, if used, is always in pfds[nfds - 1] */
			pfds[nfds].fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
			pfds[nfds].events = POLLIN;
			pfds[nfds].revents = 0;
			nfds++;
		}

		/* Sleep */
344
		rc = poll(pfds, nfds, (int) cur_timeout);
345 346 347 348

		/* Check return code */
		if (rc < 0)
		{
349 350 351 352 353 354 355 356
			/* EINTR is okay, otherwise complain */
			if (errno != EINTR)
			{
				waiting = false;
				ereport(ERROR,
						(errcode_for_socket_access(),
						 errmsg("poll() failed: %m")));
			}
357
		}
358
		else if (rc == 0)
359 360
		{
			/* timeout exceeded */
361 362
			if (wakeEvents & WL_TIMEOUT)
				result |= WL_TIMEOUT;
363
		}
364
		else
365
		{
366 367
			/* at least one event occurred, so check revents values */
			if ((wakeEvents & WL_SOCKET_READABLE) &&
368
				(pfds[0].revents & POLLIN))
369 370 371 372 373 374 375
			{
				/* data available in socket, or EOF/error condition */
				result |= WL_SOCKET_READABLE;
			}
			if ((wakeEvents & WL_SOCKET_WRITEABLE) &&
				(pfds[0].revents & POLLOUT))
			{
376
				/* socket is writable */
377 378
				result |= WL_SOCKET_WRITEABLE;
			}
379 380 381 382 383 384 385 386
			if (pfds[0].revents & (POLLHUP | POLLERR | POLLNVAL))
			{
				/* EOF/error condition */
				if (wakeEvents & WL_SOCKET_READABLE)
					result |= WL_SOCKET_READABLE;
				if (wakeEvents & WL_SOCKET_WRITEABLE)
					result |= WL_SOCKET_WRITEABLE;
			}
387

388
			/*
389 390 391
			 * We expect a POLLHUP when the remote end is closed, but because
			 * we don't expect the pipe to become readable or to have any
			 * errors either, treat those cases as postmaster death, too.
392
			 */
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
			if ((wakeEvents & WL_POSTMASTER_DEATH) &&
				(pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
			{
				/*
				 * According to the select(2) man page on Linux, select(2) may
				 * spuriously return and report a file descriptor as readable,
				 * when it's not; and presumably so can poll(2).  It's not
				 * clear that the relevant cases would ever apply to the
				 * postmaster pipe, but since the consequences of falsely
				 * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
				 * we take the trouble to positively verify EOF with
				 * PostmasterIsAlive().
				 */
				if (!PostmasterIsAlive())
					result |= WL_POSTMASTER_DEATH;
			}
409
		}
410
#elif defined(LATCH_USE_SELECT)
411

412 413 414 415 416 417 418 419
		/*
		 * On at least older linux kernels select(), in violation of POSIX,
		 * doesn't reliably return a socket as writable if closed - but we
		 * rely on that. So far all the known cases of this problem are on
		 * platforms that also provide a poll() implementation without that
		 * bug.  If we find one where that's not the case, we'll need to add a
		 * workaround.
		 */
420
		FD_ZERO(&input_mask);
421 422
		FD_ZERO(&output_mask);

423 424
		FD_SET(selfpipe_readfd, &input_mask);
		hifd = selfpipe_readfd;
425 426 427 428 429 430 431 432 433

		if (wakeEvents & WL_POSTMASTER_DEATH)
		{
			FD_SET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask);
			if (postmaster_alive_fds[POSTMASTER_FD_WATCH] > hifd)
				hifd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
		}

		if (wakeEvents & WL_SOCKET_READABLE)
434 435 436 437 438 439
		{
			FD_SET(sock, &input_mask);
			if (sock > hifd)
				hifd = sock;
		}

440
		if (wakeEvents & WL_SOCKET_WRITEABLE)
441 442 443 444 445 446
		{
			FD_SET(sock, &output_mask);
			if (sock > hifd)
				hifd = sock;
		}

447
		/* Sleep */
448
		rc = select(hifd + 1, &input_mask, &output_mask, NULL, tvp);
449 450

		/* Check return code */
451 452
		if (rc < 0)
		{
453 454 455 456 457 458 459 460
			/* EINTR is okay, otherwise complain */
			if (errno != EINTR)
			{
				waiting = false;
				ereport(ERROR,
						(errcode_for_socket_access(),
						 errmsg("select() failed: %m")));
			}
461
		}
462
		else if (rc == 0)
463 464
		{
			/* timeout exceeded */
465 466
			if (wakeEvents & WL_TIMEOUT)
				result |= WL_TIMEOUT;
467
		}
468
		else
469
		{
470 471 472 473 474 475 476 477
			/* at least one event occurred, so check masks */
			if ((wakeEvents & WL_SOCKET_READABLE) && FD_ISSET(sock, &input_mask))
			{
				/* data available in socket, or EOF */
				result |= WL_SOCKET_READABLE;
			}
			if ((wakeEvents & WL_SOCKET_WRITEABLE) && FD_ISSET(sock, &output_mask))
			{
478
				/* socket is writable, or EOF */
479 480 481
				result |= WL_SOCKET_WRITEABLE;
			}
			if ((wakeEvents & WL_POSTMASTER_DEATH) &&
482 483
				FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH],
						 &input_mask))
484 485 486 487 488 489 490 491 492 493 494 495 496 497
			{
				/*
				 * According to the select(2) man page on Linux, select(2) may
				 * spuriously return and report a file descriptor as readable,
				 * when it's not; and presumably so can poll(2).  It's not
				 * clear that the relevant cases would ever apply to the
				 * postmaster pipe, but since the consequences of falsely
				 * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
				 * we take the trouble to positively verify EOF with
				 * PostmasterIsAlive().
				 */
				if (!PostmasterIsAlive())
					result |= WL_POSTMASTER_DEATH;
			}
498
		}
499
#endif   /* LATCH_USE_SELECT */
500 501

		/* If we're not done, update cur_timeout for next iteration */
502
		if (result == 0 && (wakeEvents & WL_TIMEOUT))
503 504 505 506
		{
			INSTR_TIME_SET_CURRENT(cur_time);
			INSTR_TIME_SUBTRACT(cur_time, start_time);
			cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
507 508 509 510 511
			if (cur_timeout <= 0)
			{
				/* Timeout has expired, no need to continue looping */
				result |= WL_TIMEOUT;
			}
512
#ifdef LATCH_USE_SELECT
513 514 515 516 517
			else
			{
				tv.tv_sec = cur_timeout / 1000L;
				tv.tv_usec = (cur_timeout % 1000L) * 1000L;
			}
518 519
#endif
		}
520
	} while (result == 0);
521 522 523 524 525 526
	waiting = false;

	return result;
}

/*
527 528 529
 * Sets a latch and wakes up anyone waiting on it.
 *
 * This is cheap if the latch is already set, otherwise not so much.
530 531 532 533
 *
 * NB: when calling this in a signal handler, be sure to save and restore
 * errno around it.  (That's standard practice in most signal handlers, of
 * course, but we used to omit it in handlers that only set a flag.)
R
Robert Haas 已提交
534 535 536
 *
 * NB: this function is called from critical sections and signal handlers so
 * throwing an error is not a good idea.
537 538 539 540
 */
void
SetLatch(volatile Latch *latch)
{
541
	pid_t		owner_pid;
542

543
	/*
A
Andres Freund 已提交
544 545 546
	 * The memory barrier has be to be placed here to ensure that any flag
	 * variables possibly changed by this process have been flushed to main
	 * memory, before we check/set is_set.
547
	 */
A
Andres Freund 已提交
548
	pg_memory_barrier();
549

550 551 552 553 554 555 556
	/* Quick exit if already set */
	if (latch->is_set)
		return;

	latch->is_set = true;

	/*
557 558 559
	 * See if anyone's waiting for the latch. It can be the current process if
	 * we're in a signal handler. We use the self-pipe to wake up the select()
	 * in that case. If it's another process, send a signal.
560
	 *
561 562 563 564 565 566 567 568 569
	 * Fetch owner_pid only once, in case the latch is concurrently getting
	 * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
	 * guaranteed to be true! In practice, the effective range of pid_t fits
	 * in a 32 bit integer, and so should be atomic. In the worst case, we
	 * might end up signaling the wrong process. Even then, you're very
	 * unlucky if a process with that bogus pid exists and belongs to
	 * Postgres; and PG database processes should handle excess SIGUSR1
	 * interrupts without a problem anyhow.
	 *
570 571 572 573 574 575
	 * Another sort of race condition that's possible here is for a new
	 * process to own the latch immediately after we look, so we don't signal
	 * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
	 * the standard coding convention of waiting at the bottom of their loops,
	 * not the top, so that they'll correctly process latch-setting events
	 * that happen before they enter the loop.
576 577 578 579 580
	 */
	owner_pid = latch->owner_pid;
	if (owner_pid == 0)
		return;
	else if (owner_pid == MyProcPid)
581 582 583 584
	{
		if (waiting)
			sendSelfPipeByte();
	}
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	else
		kill(owner_pid, SIGUSR1);
}

/*
 * Clear the latch. Calling WaitLatch after this will sleep, unless
 * the latch is set again before the WaitLatch call.
 */
void
ResetLatch(volatile Latch *latch)
{
	/* Only the owner should reset the latch */
	Assert(latch->owner_pid == MyProcPid);

	latch->is_set = false;
600 601

	/*
A
Andres Freund 已提交
602
	 * Ensure that the write to is_set gets flushed to main memory before we
B
Bruce Momjian 已提交
603
	 * examine any flag variables.  Otherwise a concurrent SetLatch might
604 605 606
	 * falsely conclude that it needn't signal us, even though we have missed
	 * seeing some flag updates that SetLatch was supposed to inform us of.
	 */
A
Andres Freund 已提交
607
	pg_memory_barrier();
608 609 610
}

/*
611 612 613
 * SetLatch uses SIGUSR1 to wake up the process waiting on the latch.
 *
 * Wake up WaitLatch, if we're waiting.  (We might not be, since SIGUSR1 is
614 615 616 617 618
 * overloaded for multiple purposes; or we might not have reached WaitLatch
 * yet, in which case we don't need to fill the pipe either.)
 *
 * NB: when calling this in a signal handler, be sure to save and restore
 * errno around it.
619 620 621 622 623 624 625 626 627 628 629 630
 */
void
latch_sigusr1_handler(void)
{
	if (waiting)
		sendSelfPipeByte();
}

/* Send one byte to the self-pipe, to wake up WaitLatch */
static void
sendSelfPipeByte(void)
{
631 632
	int			rc;
	char		dummy = 0;
633 634 635 636 637 638 639 640 641 642

retry:
	rc = write(selfpipe_writefd, &dummy, 1);
	if (rc < 0)
	{
		/* If interrupted by signal, just retry */
		if (errno == EINTR)
			goto retry;

		/*
643 644
		 * If the pipe is full, we don't need to retry, the data that's there
		 * already is enough to wake up WaitLatch.
645 646 647 648 649
		 */
		if (errno == EAGAIN || errno == EWOULDBLOCK)
			return;

		/*
650 651 652
		 * Oops, the write() failed for some other reason. We might be in a
		 * signal handler, so it's not safe to elog(). We have no choice but
		 * silently ignore the error.
653 654 655 656 657
		 */
		return;
	}
}

658 659 660 661 662 663 664
/*
 * Read all available data from the self-pipe
 *
 * Note: this is only called when waiting = true.  If it fails and doesn't
 * return, it must reset that flag first (though ideally, this will never
 * happen).
 */
665 666 667 668
static void
drainSelfPipe(void)
{
	/*
669
	 * There shouldn't normally be more than one byte in the pipe, or maybe a
670
	 * few bytes if multiple processes run SetLatch at the same instant.
671
	 */
672 673
	char		buf[16];
	int			rc;
674 675 676 677 678 679 680

	for (;;)
	{
		rc = read(selfpipe_readfd, buf, sizeof(buf));
		if (rc < 0)
		{
			if (errno == EAGAIN || errno == EWOULDBLOCK)
681
				break;			/* the pipe is empty */
682
			else if (errno == EINTR)
683
				continue;		/* retry */
684
			else
685 686
			{
				waiting = false;
687
				elog(ERROR, "read() on self-pipe failed: %m");
688
			}
689 690
		}
		else if (rc == 0)
691 692
		{
			waiting = false;
693
			elog(ERROR, "unexpected EOF on self-pipe");
694 695 696 697 698 699 700
		}
		else if (rc < sizeof(buf))
		{
			/* we successfully drained the pipe; no need to read() again */
			break;
		}
		/* else buffer wasn't big enough, so read again */
701 702
	}
}