nbtsort.c 37.6 KB
Newer Older
1
/*-------------------------------------------------------------------------
2
 * btsort.c
3 4 5 6 7
 *
 * Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
8
 *	  $Id: nbtsort.c,v 1.46 1999/07/19 07:07:19 momjian Exp $
9 10 11 12 13
 *
 * NOTES
 *
 * what we do is:
 * - generate a set of initial one-block runs, distributed round-robin
14
 *	 between the output tapes.
15
 * - for each pass,
16 17 18 19 20 21
 *	 - swap input and output tape sets, rewinding both and truncating
 *	   the output tapes.
 *	 - merge the current run in each input tape to the current output
 *	   tape.
 *	   - when each input run has been exhausted, switch to another output
 *		 tape and start processing another run.
22
 * - when we have fewer runs than tapes, we know we are ready to start
23 24
 *	 merging into the btree leaf pages.  (i.e., we do not have to wait
 *	 until we have exactly one tape.)
25
 * - as we extract tuples from the final runs, we build the pages for
26 27
 *	 each level.  when we have only one page on a level, it must be the
 *	 root -- it can be attached to the btree metapage and we are done.
28 29 30
 *
 * conventions:
 * - external interface routines take in and return "void *" for their
31
 *	 opaque handles.  this is for modularity reasons.
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 *
 * this code is moderately slow (~10% slower) compared to the regular
 * btree (insertion) build code on sorted or well-clustered data.  on
 * random data, however, the insertion build code is unusable -- the
 * difference on a 60MB heap is a factor of 15 because the random
 * probes into the btree thrash the buffer pool.
 *
 * this code currently packs the pages to 100% of capacity.  this is
 * not wise, since *any* insertion will cause splitting.  filling to
 * something like the standard 70% steady-state load factor for btrees
 * would probably be better.
 *
 * somebody desperately needs to figure out how to do a better job of
 * balancing the merge passes -- the fan-in on the final merges can be
 * pretty poor, which is bad for performance.
 *-------------------------------------------------------------------------
 */

50 51
#include <fcntl.h>

52
#include "postgres.h"
53

54
#include "access/nbtree.h"
55

56

57
#ifdef BTREE_BUILD_STATS
M
 
Marc G. Fournier 已提交
58
#define ShowExecutorStats pg_options[TRACE_EXECUTORSTATS]
59 60
#endif

61 62 63
static BTItem _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags);
static BTItem _bt_minitem(Page opage, BlockNumber oblkno, int atend);
static void *_bt_pagestate(Relation index, int flags, int level, bool doupper);
B
Bruce Momjian 已提交
64
static void _bt_uppershutdown(Relation index, BTPageState *state);
65

66 67 68 69 70 71 72 73 74
/*
 * turn on debugging output.
 *
 * XXX this code just does a numeric printf of the index key, so it's
 * only really useful for integer keys.
 */
/*#define FASTBUILD_DEBUG*/
#define FASTBUILD_SPOOL
#define FASTBUILD_MERGE
75

76
#define MAXTAPES		(7)
77
#define TAPEBLCKSZ		(BLCKSZ << 2)
78

79 80
extern int	NDirectFileRead;
extern int	NDirectFileWrite;
81

82
/*
83
 * this is what we use to shovel BTItems in and out of memory.	it's
84
 * bigger than a standard block because we are doing a lot of strictly
85
 * sequential i/o.	this is obviously something of a tradeoff since we
86 87 88
 * are potentially reading a bunch of zeroes off of disk in many
 * cases.
 *
89
 * BTItems are packed in and MAXALIGN'd.
90 91 92 93 94
 *
 * the fd should not be going out to disk, strictly speaking, but it's
 * the only thing like that so i'm not going to worry about wasting a
 * few bytes.
 */
95 96
typedef struct
{
97
	int			bttb_magic;		/* magic number */
98
	File		bttb_fd;		/* file descriptor */
99 100 101 102
	int			bttb_top;		/* top of free space within bttb_data */
	short		bttb_ntup;		/* number of tuples in this block */
	short		bttb_eor;		/* End-Of-Run marker */
	char		bttb_data[TAPEBLCKSZ - 2 * sizeof(double)];
B
Bruce Momjian 已提交
103
} BTTapeBlock;
104 105 106 107 108 109 110 111

/*
 * this structure holds the bookkeeping for a simple balanced multiway
 * merge.  (polyphase merging is hairier than i want to get into right
 * now, and i don't see why i have to care how many "tapes" i use
 * right now.  though if psort was in a condition that i could hack it
 * to do this, you bet i would.)
 */
112 113
typedef struct
{
114 115 116 117 118
	int			bts_ntapes;
	int			bts_tape;
	BTTapeBlock **bts_itape;	/* input tape blocks */
	BTTapeBlock **bts_otape;	/* output tape blocks */
	bool		isunique;
B
Bruce Momjian 已提交
119
} BTSpool;
120

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
/*-------------------------------------------------------------------------
 * sorting comparison routine - returns {-1,0,1} depending on whether
 * the key in the left BTItem is {<,=,>} the key in the right BTItem.
 *
 * we want to use _bt_isortcmp as a comparison function for qsort(3),
 * but it needs extra arguments, so we "pass them in" as global
 * variables.  ick.  fortunately, they are the same throughout the
 * build, so we need do this only once.  this is why you must call
 * _bt_isortcmpinit before the call to qsort(3).
 *
 * a NULL BTItem is always assumed to be greater than any actual
 * value; our heap routines (see below) assume that the smallest
 * element in the heap is returned.  that way, NULL values from the
 * exhausted tapes can sift down to the bottom of the heap.  in point
 * of fact we just don't replace the elements of exhausted tapes, but
 * what the heck.
 * *-------------------------------------------------------------------------
 */
139 140
typedef struct
{
141 142 143
	Datum	   *btsk_datum;
	char	   *btsk_nulls;
	BTItem		btsk_item;
B
Bruce Momjian 已提交
144
} BTSortKey;
145

146
static Relation _bt_sortrel;
147
static int	_bt_nattr;
148
static BTSpool *_bt_inspool;
149 150

static void
B
Bruce Momjian 已提交
151
_bt_isortcmpinit(Relation index, BTSpool *spool)
152
{
153 154 155
	_bt_sortrel = index;
	_bt_inspool = spool;
	_bt_nattr = index->rd_att->natts;
156 157 158
}

static int
B
Bruce Momjian 已提交
159
_bt_isortcmp(BTSortKey *k1, BTSortKey *k2)
160
{
161 162 163 164 165 166
	Datum	   *k1_datum = k1->btsk_datum;
	Datum	   *k2_datum = k2->btsk_datum;
	char	   *k1_nulls = k1->btsk_nulls;
	char	   *k2_nulls = k2->btsk_nulls;
	bool		equal_isnull = false;
	int			i;
167 168

	if (k1->btsk_item == (BTItem) NULL)
V
Vadim B. Mikheev 已提交
169
	{
170
		if (k2->btsk_item == (BTItem) NULL)
171 172
			return 0;			/* 1 = 2 */
		return 1;				/* 1 > 2 */
V
Vadim B. Mikheev 已提交
173
	}
174
	else if (k2->btsk_item == (BTItem) NULL)
175
		return -1;				/* 1 < 2 */
176 177 178 179 180 181 182 183 184 185

	for (i = 0; i < _bt_nattr; i++)
	{
		if (k1_nulls[i] != ' ') /* k1 attr is NULL */
		{
			if (k2_nulls[i] != ' ')		/* the same for k2 */
			{
				equal_isnull = true;
				continue;
			}
186
			return 1;			/* NULL ">" NOT_NULL */
187 188
		}
		else if (k2_nulls[i] != ' ')	/* k2 attr is NULL */
189
			return -1;			/* NOT_NULL "<" NULL */
190 191 192

		if (_bt_invokestrat(_bt_sortrel, i + 1, BTGreaterStrategyNumber,
							k1_datum[i], k2_datum[i]))
193
			return 1;			/* 1 > 2 */
194 195
		else if (_bt_invokestrat(_bt_sortrel, i + 1, BTGreaterStrategyNumber,
								 k2_datum[i], k1_datum[i]))
196
			return -1;			/* 1 < 2 */
197 198 199 200 201
	}

	if (_bt_inspool->isunique && !equal_isnull)
	{
		_bt_spooldestroy((void *) _bt_inspool);
202
		elog(ERROR, "Cannot create unique index. Table contains non-unique values");
203
	}
204
	return 0;					/* 1 = 2 */
205 206
}

207
static void
B
Bruce Momjian 已提交
208
_bt_setsortkey(Relation index, BTItem bti, BTSortKey *sk)
209
{
210 211 212 213 214 215
	sk->btsk_item = (BTItem) NULL;
	sk->btsk_datum = (Datum *) NULL;
	sk->btsk_nulls = (char *) NULL;

	if (bti != (BTItem) NULL)
	{
216 217 218 219 220 221
		IndexTuple	it = &(bti->bti_itup);
		TupleDesc	itdesc = index->rd_att;
		Datum	   *dp = (Datum *) palloc(_bt_nattr * sizeof(Datum));
		char	   *np = (char *) palloc(_bt_nattr * sizeof(char));
		bool		isnull;
		int			i;
222 223 224 225 226 227 228 229 230 231 232 233

		for (i = 0; i < _bt_nattr; i++)
		{
			dp[i] = index_getattr(it, i + 1, itdesc, &isnull);
			if (isnull)
				np[i] = 'n';
			else
				np[i] = ' ';
		}
		sk->btsk_item = bti;
		sk->btsk_datum = dp;
		sk->btsk_nulls = np;
234 235 236
	}
}

237 238 239 240 241 242 243 244 245 246 247
/*-------------------------------------------------------------------------
 * priority queue methods
 *
 * these were more-or-less lifted from the heap section of the 1984
 * edition of gonnet's book on algorithms and data structures.  they
 * are coded so that the smallest element in the heap is returned (we
 * use them for merging sorted runs).
 *
 * XXX these probably ought to be generic library functions.
 *-------------------------------------------------------------------------
 */
248 249
typedef struct
{
250 251
	int			btpqe_tape;		/* tape identifier */
	BTSortKey	btpqe_item;		/* pointer to BTItem in tape buffer */
B
Bruce Momjian 已提交
252
} BTPriQueueElem;
253 254 255 256

#define MAXELEM MAXTAPES
typedef struct
{
257 258 259
	int			btpq_nelem;
	BTPriQueueElem btpq_queue[MAXELEM];
	Relation	btpq_rel;
B
Bruce Momjian 已提交
260
} BTPriQueue;
261 262 263

/* be sure to call _bt_isortcmpinit first */
#define GREATER(a, b) \
264
	(_bt_isortcmp(&((a)->btpqe_item), &((b)->btpqe_item)) > 0)
265 266

static void
B
Bruce Momjian 已提交
267
_bt_pqsift(BTPriQueue *q, int parent)
268
{
269 270
	int			child;
	BTPriQueueElem e;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	for (child = parent * 2 + 1;
		 child < q->btpq_nelem;
		 child = parent * 2 + 1)
	{
		if (child < q->btpq_nelem - 1)
		{
			if (GREATER(&(q->btpq_queue[child]), &(q->btpq_queue[child + 1])))
				++child;
		}
		if (GREATER(&(q->btpq_queue[parent]), &(q->btpq_queue[child])))
		{
			e = q->btpq_queue[child];	/* struct = */
			q->btpq_queue[child] = q->btpq_queue[parent];		/* struct = */
			q->btpq_queue[parent] = e;	/* struct = */
			parent = child;
		}
		else
			parent = child + 1;
290 291 292 293
	}
}

static int
B
Bruce Momjian 已提交
294
_bt_pqnext(BTPriQueue *q, BTPriQueueElem *e)
295
{
296 297
	if (q->btpq_nelem < 1)
	{							/* already empty */
298
		return -1;
299 300 301 302 303
	}
	*e = q->btpq_queue[0];		/* struct = */

	if (--q->btpq_nelem < 1)
	{							/* now empty, don't sift */
304
		return 0;
305 306 307
	}
	q->btpq_queue[0] = q->btpq_queue[q->btpq_nelem];	/* struct = */
	_bt_pqsift(q, 0);
308
	return 0;
309 310 311
}

static void
B
Bruce Momjian 已提交
312
_bt_pqadd(BTPriQueue *q, BTPriQueueElem *e)
313
{
314 315
	int			child,
				parent;
316 317

	if (q->btpq_nelem >= MAXELEM)
318
		elog(ERROR, "_bt_pqadd: queue overflow");
319 320 321 322 323 324 325 326 327 328 329 330

	child = q->btpq_nelem++;
	while (child > 0)
	{
		parent = child / 2;
		if (GREATER(e, &(q->btpq_queue[parent])))
			break;
		else
		{
			q->btpq_queue[child] = q->btpq_queue[parent];		/* struct = */
			child = parent;
		}
331 332
	}

333
	q->btpq_queue[child] = *e;	/* struct = */
334 335 336 337 338 339 340
}

/*-------------------------------------------------------------------------
 * tape methods
 *-------------------------------------------------------------------------
 */

341 342 343 344 345 346 347 348 349 350
#define BTITEMSZ(btitem) \
	((btitem) ? \
	 (IndexTupleDSize((btitem)->bti_itup) + \
	  (sizeof(BTItemData) - sizeof(IndexTupleData))) : \
	 0)
#define SPCLEFT(tape) \
	(sizeof((tape)->bttb_data) - (tape)->bttb_top)
#define EMPTYTAPE(tape) \
	((tape)->bttb_ntup <= 0)
#define BTTAPEMAGIC		0x19660226
351 352 353

/*
 * reset the tape header for its next use without doing anything to
354
 * the physical tape file.	(setting bttb_top to 0 makes the block
355 356 357
 * empty.)
 */
static void
B
Bruce Momjian 已提交
358
_bt_tapereset(BTTapeBlock *tape)
359
{
360 361 362
	tape->bttb_eor = 0;
	tape->bttb_top = 0;
	tape->bttb_ntup = 0;
363 364 365 366 367 368
}

/*
 * rewind the physical tape file.
 */
static void
B
Bruce Momjian 已提交
369
_bt_taperewind(BTTapeBlock *tape)
370
{
371
	FileSeek(tape->bttb_fd, 0L, SEEK_SET);
372 373 374 375 376 377 378 379 380 381 382 383
}

/*
 * destroy the contents of the physical tape file without destroying
 * the tape data structure or removing the physical tape file.
 *
 * we use the VFD version of ftruncate(2) to do this rather than
 * unlinking and recreating the file.  you still have to wait while
 * the OS frees up all of the file system blocks and stuff, but at
 * least you don't have to delete and reinsert the directory entries.
 */
static void
B
Bruce Momjian 已提交
384
_bt_tapeclear(BTTapeBlock *tape)
385
{
386 387
	/* blow away the contents of the old file */
	_bt_taperewind(tape);
M
 
Marc G. Fournier 已提交
388
#ifdef NOT_USED
389
	FileSync(tape->bttb_fd);
390
#endif
391
	FileTruncate(tape->bttb_fd, 0);
392

393 394
	/* reset the buffer */
	_bt_tapereset(tape);
395 396 397 398 399 400 401
}

/*
 * create a new BTTapeBlock, allocating memory for the data structure
 * as well as opening a physical tape file.
 */
static BTTapeBlock *
402
_bt_tapecreate(void)
403
{
404
	BTTapeBlock *tape = (BTTapeBlock *) palloc(sizeof(BTTapeBlock));
405

406
	if (tape == (BTTapeBlock *) NULL)
407
		elog(ERROR, "_bt_tapecreate: out of memory");
408

409
	tape->bttb_magic = BTTAPEMAGIC;
410

411
	tape->bttb_fd = OpenTemporaryFile();
412
	Assert(tape->bttb_fd >= 0);
413

414 415
	/* initialize the buffer */
	_bt_tapereset(tape);
416

417
	return tape;
418 419 420 421 422 423
}

/*
 * destroy the BTTapeBlock structure and its physical tape file.
 */
static void
B
Bruce Momjian 已提交
424
_bt_tapedestroy(BTTapeBlock *tape)
425
{
426 427
	FileUnlink(tape->bttb_fd);
	pfree((void *) tape);
428 429 430 431 432 433
}

/*
 * flush the tape block to the file, marking End-Of-Run if requested.
 */
static void
B
Bruce Momjian 已提交
434
_bt_tapewrite(BTTapeBlock *tape, int eor)
435
{
436 437
	tape->bttb_eor = eor;
	FileWrite(tape->bttb_fd, (char *) tape, TAPEBLCKSZ);
438
	NDirectFileWrite += TAPEBLCKSZ / BLCKSZ;
439
	_bt_tapereset(tape);
440 441 442 443 444 445 446 447
}

/*
 * read a tape block from the file, overwriting the current contents
 * of the buffer.
 *
 * returns:
 * - 0 if there are no more blocks in the tape or in this run (call
448
 *	 _bt_tapereset to clear the End-Of-Run marker)
449 450 451
 * - 1 if a valid block was read
 */
static int
B
Bruce Momjian 已提交
452
_bt_taperead(BTTapeBlock *tape)
453
{
454
	File		fd;
455
	int			nread;
456 457 458

	if (tape->bttb_eor)
	{
459
		return 0;				/* we are already at End-Of-Run */
460 461 462 463 464 465 466 467 468 469 470 471 472
	}

	/*
	 * we're clobbering the old tape block, but we do need to save the VFD
	 * (the one in the block we're reading is bogus).
	 */
	fd = tape->bttb_fd;
	nread = FileRead(fd, (char *) tape, TAPEBLCKSZ);
	tape->bttb_fd = fd;

	if (nread != TAPEBLCKSZ)
	{
		Assert(nread == 0);		/* we are at EOF */
473
		return 0;
474 475
	}
	Assert(tape->bttb_magic == BTTAPEMAGIC);
476
	NDirectFileRead += TAPEBLCKSZ / BLCKSZ;
477
	return 1;
478 479 480 481 482 483 484 485 486 487 488 489
}

/*
 * get the next BTItem from a tape block.
 *
 * returns:
 * - NULL if we have run out of BTItems
 * - a pointer to the BTItemData in the block otherwise
 *
 * side effects:
 * - sets 'pos' to the current position within the block.
 */
490
static BTItem
B
Bruce Momjian 已提交
491
_bt_tapenext(BTTapeBlock *tape, char **pos)
492
{
493 494
	Size		itemsz;
	BTItem		bti;
495 496

	if (*pos >= tape->bttb_data + tape->bttb_top)
497
		return (BTItem) NULL;
B
Bruce Momjian 已提交
498
	bti = (BTItem) *pos;
499
	itemsz = BTITEMSZ(bti);
500
	*pos += MAXALIGN(itemsz);
501
	return bti;
502 503 504 505 506 507 508 509 510 511 512 513 514 515
}

/*
 * copy a BTItem into a tape block.
 *
 * assumes that we have already checked to see if the block has enough
 * space for the item.
 *
 * side effects:
 *
 * - advances the 'top' pointer in the tape block header to point to
 * the beginning of free space.
 */
static void
B
Bruce Momjian 已提交
516
_bt_tapeadd(BTTapeBlock *tape, BTItem item, int itemsz)
517
{
518 519
	memcpy(tape->bttb_data + tape->bttb_top, item, itemsz);
	++tape->bttb_ntup;
520
	tape->bttb_top += MAXALIGN(itemsz);
521 522 523 524 525 526 527 528 529 530 531
}

/*-------------------------------------------------------------------------
 * spool methods
 *-------------------------------------------------------------------------
 */

/*
 * create and initialize a spool structure, including the underlying
 * files.
 */
532
void *
533
_bt_spoolinit(Relation index, int ntapes, bool isunique)
534
{
535 536
	BTSpool    *btspool = (BTSpool *) palloc(sizeof(BTSpool));
	int			i;
537

538
	if (btspool == (BTSpool *) NULL)
539
		elog(ERROR, "_bt_spoolinit: out of memory");
B
Bruce Momjian 已提交
540
	MemSet((char *) btspool, 0, sizeof(BTSpool));
541 542 543 544
	btspool->bts_ntapes = ntapes;
	btspool->bts_tape = 0;
	btspool->isunique = isunique;

B
Bruce Momjian 已提交
545 546
	btspool->bts_itape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
	btspool->bts_otape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
547 548
	if (btspool->bts_itape == (BTTapeBlock **) NULL ||
		btspool->bts_otape == (BTTapeBlock **) NULL)
549
		elog(ERROR, "_bt_spoolinit: out of memory");
550 551 552

	for (i = 0; i < ntapes; ++i)
	{
B
Bruce Momjian 已提交
553 554
		btspool->bts_itape[i] = _bt_tapecreate();
		btspool->bts_otape[i] = _bt_tapecreate();
555 556 557 558
	}

	_bt_isortcmpinit(index, btspool);

559
	return (void *) btspool;
560 561 562 563 564 565 566 567
}

/*
 * clean up a spool structure and its substructures.
 */
void
_bt_spooldestroy(void *spool)
{
568 569
	BTSpool    *btspool = (BTSpool *) spool;
	int			i;
570 571 572 573 574 575 576

	for (i = 0; i < btspool->bts_ntapes; ++i)
	{
		_bt_tapedestroy(btspool->bts_otape[i]);
		_bt_tapedestroy(btspool->bts_itape[i]);
	}
	pfree((void *) btspool);
577 578 579 580 581 582
}

/*
 * flush out any dirty output tape blocks
 */
static void
B
Bruce Momjian 已提交
583
_bt_spoolflush(BTSpool *btspool)
584
{
585
	int			i;
586

587 588 589 590
	for (i = 0; i < btspool->bts_ntapes; ++i)
	{
		if (!EMPTYTAPE(btspool->bts_otape[i]))
			_bt_tapewrite(btspool->bts_otape[i], 1);
591 592 593 594 595 596 597 598 599 600
	}
}

/*
 * swap input tapes and output tapes by swapping their file
 * descriptors.  additional preparation for the next merge pass
 * includes rewinding the new input tapes and clearing out the new
 * output tapes.
 */
static void
B
Bruce Momjian 已提交
601
_bt_spoolswap(BTSpool *btspool)
602
{
603 604 605 606
	File		tmpfd;
	BTTapeBlock *itape;
	BTTapeBlock *otape;
	int			i;
607

608 609 610 611
	for (i = 0; i < btspool->bts_ntapes; ++i)
	{
		itape = btspool->bts_itape[i];
		otape = btspool->bts_otape[i];
612

613 614 615 616 617 618
		/*
		 * swap the input and output VFDs.
		 */
		tmpfd = itape->bttb_fd;
		itape->bttb_fd = otape->bttb_fd;
		otape->bttb_fd = tmpfd;
619

620 621 622 623 624
		/*
		 * rewind the new input tape.
		 */
		_bt_taperewind(itape);
		_bt_tapereset(itape);
625

626 627 628 629 630 631
		/*
		 * clear the new output tape -- it's ok to throw away the old
		 * inputs.
		 */
		_bt_tapeclear(otape);
	}
632 633 634 635 636 637 638 639
}

/*-------------------------------------------------------------------------
 * sorting routines
 *-------------------------------------------------------------------------
 */

/*
640
 * spool 'btitem' into an initial run.	as tape blocks are filled, the
641 642 643 644 645 646 647
 * block BTItems are qsorted and written into some output tape (it
 * doesn't matter which; we go round-robin for simplicity).  the
 * initial runs are therefore always just one block.
 */
void
_bt_spool(Relation index, BTItem btitem, void *spool)
{
648 649 650
	BTSpool    *btspool = (BTSpool *) spool;
	BTTapeBlock *itape;
	Size		itemsz;
651

652 653 654 655
	_bt_isortcmpinit(index, btspool);

	itape = btspool->bts_itape[btspool->bts_tape];
	itemsz = BTITEMSZ(btitem);
656
	itemsz = MAXALIGN(itemsz);
657 658

	/*
659 660 661
	 * if this buffer is too full for this BTItemData, or if we have run
	 * out of BTItems, we need to sort the buffer and write it out.  in
	 * this case, the BTItemData will go into the next tape's buffer.
662
	 */
663 664
	if (btitem == (BTItem) NULL || SPCLEFT(itape) < itemsz)
	{
665 666 667 668 669 670 671
		BTSortKey  *parray = (BTSortKey *) NULL;
		BTTapeBlock *otape;
		BTItem		bti;
		char	   *pos;
		int			btisz;
		int			it_ntup = itape->bttb_ntup;
		int			i;
672 673 674 675 676 677 678

		/*
		 * build an array of pointers to the BTItemDatas on the input
		 * block.
		 */
		if (it_ntup > 0)
		{
679
			parray = (BTSortKey *) palloc(it_ntup * sizeof(BTSortKey));
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
			pos = itape->bttb_data;
			for (i = 0; i < it_ntup; ++i)
				_bt_setsortkey(index, _bt_tapenext(itape, &pos), &(parray[i]));

			/*
			 * qsort the pointer array.
			 */
			qsort((void *) parray, it_ntup, sizeof(BTSortKey),
				  (int (*) (const void *, const void *)) _bt_isortcmp);
		}

		/*
		 * write the spooled run into the output tape.	we copy the
		 * BTItemDatas in the order dictated by the sorted array of
		 * BTItems, not the original order.
		 *
696
		 * (since everything was MAXALIGN'd and is all on a single tape
697 698 699 700 701 702 703
		 * block, everything had *better* still fit on one tape block..)
		 */
		otape = btspool->bts_otape[btspool->bts_tape];
		for (i = 0; i < it_ntup; ++i)
		{
			bti = parray[i].btsk_item;
			btisz = BTITEMSZ(bti);
704
			btisz = MAXALIGN(btisz);
705
			_bt_tapeadd(otape, bti, btisz);
706
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_SPOOL)
707
			{
708 709 710
				bool		isnull;
				Datum		d = index_getattr(&(bti->bti_itup), 1, index->rd_att,
											  &isnull);
711

712 713 714
				printf("_bt_spool: inserted <%x> into output tape %d\n",
					   d, btspool->bts_tape);
			}
715
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_SPOOL */
716
		}
717

718 719 720 721 722
		/*
		 * the initial runs are always single tape blocks.	flush the
		 * output block, marking End-Of-Run.
		 */
		_bt_tapewrite(otape, 1);
723

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
		/*
		 * reset the input buffer for the next run.  we don't have to
		 * write it out or anything -- we only use it to hold the unsorted
		 * BTItemDatas, the output tape contains all the sorted stuff.
		 *
		 * changing bts_tape changes the output tape and input tape; we
		 * change itape for the code below.
		 */
		_bt_tapereset(itape);
		btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes;
		itape = btspool->bts_itape[btspool->bts_tape];

		/*
		 * destroy the pointer array.
		 */
		if (parray != (BTSortKey *) NULL)
		{
			for (i = 0; i < it_ntup; i++)
			{
				if (parray[i].btsk_datum != (Datum *) NULL)
					pfree((void *) (parray[i].btsk_datum));
				if (parray[i].btsk_nulls != (char *) NULL)
					pfree((void *) (parray[i].btsk_nulls));
			}
			pfree((void *) parray);
		}
750
	}
751

752 753 754
	/* insert this item into the current buffer */
	if (btitem != (BTItem) NULL)
		_bt_tapeadd(itape, btitem, itemsz);
755 756 757 758 759 760
}

/*
 * allocate a new, clean btree page, not linked to any siblings.
 */
static void
761
_bt_blnewpage(Relation index, Buffer *buf, Page *page, int flags)
762
{
763
	BTPageOpaque opaque;
764

765
	*buf = _bt_getbuf(index, P_NEW, BT_WRITE);
M
 
Marc G. Fournier 已提交
766
#ifdef NOT_USED
767
	printf("\tblk=%d\n", BufferGetBlockNumber(*buf));
768
#endif
769 770 771 772 773
	*page = BufferGetPage(*buf);
	_bt_pageinit(*page, BufferGetPageSize(*buf));
	opaque = (BTPageOpaque) PageGetSpecialPointer(*page);
	opaque->btpo_prev = opaque->btpo_next = P_NONE;
	opaque->btpo_flags = flags;
774 775 776 777
}

/*
 * slide an array of ItemIds back one slot (from P_FIRSTKEY to
778 779 780
 * P_HIKEY, overwriting P_HIKEY).  we need to do this when we discover
 * that we have built an ItemId array in what has turned out to be a
 * P_RIGHTMOST page.
781 782 783 784
 */
static void
_bt_slideleft(Relation index, Buffer buf, Page page)
{
785 786 787 788
	OffsetNumber off;
	OffsetNumber maxoff;
	ItemId		previi;
	ItemId		thisii;
789 790 791 792 793 794 795 796 797 798 799 800

	if (!PageIsEmpty(page))
	{
		maxoff = PageGetMaxOffsetNumber(page);
		previi = PageGetItemId(page, P_HIKEY);
		for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off))
		{
			thisii = PageGetItemId(page, off);
			*previi = *thisii;
			previi = thisii;
		}
		((PageHeader) page)->pd_lower -= sizeof(ItemIdData);
801
	}
802 803
}

804 805 806 807
/*
 * allocate and initialize a new BTPageState.  the returned structure
 * is suitable for immediate use by _bt_buildadd.
 */
808
static void *
809 810
_bt_pagestate(Relation index, int flags, int level, bool doupper)
{
811
	BTPageState *state = (BTPageState *) palloc(sizeof(BTPageState));
812

B
Bruce Momjian 已提交
813
	MemSet((char *) state, 0, sizeof(BTPageState));
814 815 816 817 818 819 820 821
	_bt_blnewpage(index, &(state->btps_buf), &(state->btps_page), flags);
	state->btps_firstoff = InvalidOffsetNumber;
	state->btps_lastoff = P_HIKEY;
	state->btps_lastbti = (BTItem) NULL;
	state->btps_next = (BTPageState *) NULL;
	state->btps_level = level;
	state->btps_doupper = doupper;

822
	return (void *) state;
823 824 825 826 827 828 829 830
}

/*
 * return a copy of the minimum (P_HIKEY or P_FIRSTKEY) item on
 * 'opage'.  the copy is modified to point to 'opage' (as opposed to
 * the page to which the item used to point, e.g., a heap page if
 * 'opage' is a leaf page).
 */
831
static BTItem
832 833
_bt_minitem(Page opage, BlockNumber oblkno, int atend)
{
834 835 836
	OffsetNumber off;
	BTItem		obti;
	BTItem		nbti;
837

838 839 840 841
	off = atend ? P_HIKEY : P_FIRSTKEY;
	obti = (BTItem) PageGetItem(opage, PageGetItemId(opage, off));
	nbti = _bt_formitem(&(obti->bti_itup));
	ItemPointerSet(&(nbti->bti_itup.t_tid), oblkno, P_HIKEY);
842

843
	return nbti;
844 845
}

846 847 848 849 850 851
/*
 * add an item to a disk page from a merge tape block.
 *
 * we must be careful to observe the following restrictions, placed
 * upon us by the conventions in nbtsearch.c:
 * - rightmost pages start data items at P_HIKEY instead of at
852
 *	 P_FIRSTKEY.
853
 * - duplicates cannot be split among pages unless the chain of
854
 *	 duplicates starts at the first data item.
855 856 857 858
 *
 * a leaf page being built looks like:
 *
 * +----------------+---------------------------------+
859
 * | PageHeaderData | linp0 linp1 linp2 ...			  |
860
 * +-----------+----+---------------------------------+
861
 * | ... linpN |				  ^ first			  |
862
 * +-----------+--------------------------------------+
863 864 865
 * |	 ^ last										  |
 * |												  |
 * |			   v last							  |
866
 * +-------------+------------------------------------+
867
 * |			 | itemN ...						  |
868
 * +-------------+------------------+-----------------+
869
 * |		  ... item3 item2 item1 | "special space" |
870
 * +--------------------------------+-----------------+
871
 *						^ first
872 873 874 875 876 877 878 879 880 881 882 883 884
 *
 * contrast this with the diagram in bufpage.h; note the mismatch
 * between linps and items.  this is because we reserve linp0 as a
 * placeholder for the pointer to the "high key" item; when we have
 * filled up the page, we will set linp0 to point to itemN and clear
 * linpN.
 *
 * 'last' pointers indicate the last offset/item added to the page.
 * 'first' pointers indicate the first offset/item that is part of a
 * chain of duplicates extending from 'first' to 'last'.
 *
 * if all keys are unique, 'first' will always be the same as 'last'.
 */
885
static BTItem
886
_bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
887
{
888 889 890 891 892 893 894 895 896
	BTPageState *state = (BTPageState *) pstate;
	Buffer		nbuf;
	Page		npage;
	BTItem		last_bti;
	OffsetNumber first_off;
	OffsetNumber last_off;
	OffsetNumber off;
	Size		pgspc;
	Size		btisz;
897 898 899 900 901 902 903 904 905

	nbuf = state->btps_buf;
	npage = state->btps_page;
	first_off = state->btps_firstoff;
	last_off = state->btps_lastoff;
	last_bti = state->btps_lastbti;

	pgspc = PageGetFreeSpace(npage);
	btisz = BTITEMSZ(bti);
906
	btisz = MAXALIGN(btisz);
907 908
	if (pgspc < btisz)
	{
909 910 911 912 913 914
		Buffer		obuf = nbuf;
		Page		opage = npage;
		OffsetNumber o,
					n;
		ItemId		ii;
		ItemId		hii;
915

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
		_bt_blnewpage(index, &nbuf, &npage, flags);

		/*
		 * if 'last' is part of a chain of duplicates that does not start
		 * at the beginning of the old page, the entire chain is copied to
		 * the new page; we delete all of the duplicates from the old page
		 * except the first, which becomes the high key item of the old
		 * page.
		 *
		 * if the chain starts at the beginning of the page or there is no
		 * chain ('first' == 'last'), we need only copy 'last' to the new
		 * page.  again, 'first' (== 'last') becomes the high key of the
		 * old page.
		 *
		 * note that in either case, we copy at least one item to the new
		 * page, so 'last_bti' will always be valid.  'bti' will never be
		 * the first data item on the new page.
		 */
		if (first_off == P_FIRSTKEY)
		{
			Assert(last_off != P_FIRSTKEY);
			first_off = last_off;
		}
		for (o = first_off, n = P_FIRSTKEY;
			 o <= last_off;
			 o = OffsetNumberNext(o), n = OffsetNumberNext(n))
		{
			ii = PageGetItemId(opage, o);
			if (PageAddItem(npage, PageGetItem(opage, ii),
						  ii->lp_len, n, LP_USED) == InvalidOffsetNumber)
				elog(FATAL, "btree: failed to add item to the page in _bt_sort (1)");
M
 
Marc G. Fournier 已提交
947
#ifdef NOT_USED
948
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
949
			{
950 951
				bool		isnull;
				BTItem		tmpbti =
952
				(BTItem) PageGetItem(npage, PageGetItemId(npage, n));
953 954
				Datum		d = index_getattr(&(tmpbti->bti_itup), 1,
											  index->rd_att, &isnull);
955 956 957 958

				printf("_bt_buildadd: moved <%x> to offset %d at level %d\n",
					   d, n, state->btps_level);
			}
959
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
960
#endif
961
		}
962

963 964 965 966 967 968 969 970 971 972 973 974 975
		/*
		 * this loop is backward because PageIndexTupleDelete shuffles the
		 * tuples to fill holes in the page -- by starting at the end and
		 * working back, we won't create holes (and thereby avoid
		 * shuffling).
		 */
		for (o = last_off; o > first_off; o = OffsetNumberPrev(o))
			PageIndexTupleDelete(opage, o);
		hii = PageGetItemId(opage, P_HIKEY);
		ii = PageGetItemId(opage, first_off);
		*hii = *ii;
		ii->lp_flags &= ~LP_USED;
		((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
976

977 978 979
		first_off = P_FIRSTKEY;
		last_off = PageGetMaxOffsetNumber(npage);
		last_bti = (BTItem) PageGetItem(npage, PageGetItemId(npage, last_off));
980

981 982 983 984
		/*
		 * set the page (side link) pointers.
		 */
		{
985 986
			BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
			BTPageOpaque nopaque = (BTPageOpaque) PageGetSpecialPointer(npage);
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005

			oopaque->btpo_next = BufferGetBlockNumber(nbuf);
			nopaque->btpo_prev = BufferGetBlockNumber(obuf);
			nopaque->btpo_next = P_NONE;

			if (_bt_itemcmp(index, _bt_nattr,
			  (BTItem) PageGetItem(opage, PageGetItemId(opage, P_HIKEY)),
			(BTItem) PageGetItem(opage, PageGetItemId(opage, P_FIRSTKEY)),
							BTEqualStrategyNumber))
				oopaque->btpo_flags |= BTP_CHAIN;
		}

		/*
		 * copy the old buffer's minimum key to its parent.  if we don't
		 * have a parent, we have to create one; this adds a new btree
		 * level.
		 */
		if (state->btps_doupper)
		{
1006
			BTItem		nbti;
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

			if (state->btps_next == (BTPageState *) NULL)
			{
				state->btps_next =
					_bt_pagestate(index, 0, state->btps_level + 1, true);
			}
			nbti = _bt_minitem(opage, BufferGetBlockNumber(obuf), 0);
			_bt_buildadd(index, state->btps_next, nbti, 0);
			pfree((void *) nbti);
		}

		/*
		 * write out the old stuff.  we never want to see it again, so we
		 * can give up our lock (if we had one; BuildingBtree is set, so
		 * we aren't locking).
		 */
		_bt_wrtbuf(index, obuf);
1024 1025
	}

1026
	/*
1027 1028
	 * if this item is different from the last item added, we start a new
	 * chain of duplicates.
1029
	 */
1030 1031 1032
	off = OffsetNumberNext(last_off);
	if (PageAddItem(npage, (Item) bti, btisz, off, LP_USED) == InvalidOffsetNumber)
		elog(FATAL, "btree: failed to add item to the page in _bt_sort (2)");
M
 
Marc G. Fournier 已提交
1033
#ifdef NOT_USED
1034
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
1035
	{
1036 1037
		bool		isnull;
		Datum		d = index_getattr(&(bti->bti_itup), 1, index->rd_att, &isnull);
1038 1039 1040 1041

		printf("_bt_buildadd: inserted <%x> at offset %d at level %d\n",
			   d, off, state->btps_level);
	}
1042
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
1043
#endif
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	if (last_bti == (BTItem) NULL)
		first_off = P_FIRSTKEY;
	else if (!_bt_itemcmp(index, _bt_nattr,
						  bti, last_bti, BTEqualStrategyNumber))
		first_off = off;
	last_off = off;
	last_bti = (BTItem) PageGetItem(npage, PageGetItemId(npage, off));

	state->btps_buf = nbuf;
	state->btps_page = npage;
	state->btps_lastbti = last_bti;
	state->btps_lastoff = last_off;
	state->btps_firstoff = first_off;

1058
	return last_bti;
1059 1060
}

1061
static void
B
Bruce Momjian 已提交
1062
_bt_uppershutdown(Relation index, BTPageState *state)
1063
{
1064 1065 1066 1067
	BTPageState *s;
	BlockNumber blkno;
	BTPageOpaque opaque;
	BTItem		bti;
1068

1069 1070 1071 1072
	for (s = state; s != (BTPageState *) NULL; s = s->btps_next)
	{
		blkno = BufferGetBlockNumber(s->btps_buf);
		opaque = (BTPageOpaque) PageGetSpecialPointer(s->btps_page);
1073

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
		/*
		 * if this is the root, attach it to the metapage.	otherwise,
		 * stick the minimum key of the last page on this level (which has
		 * not been split, or else it wouldn't be the last page) into its
		 * parent.	this may cause the last page of upper levels to split,
		 * but that's not a problem -- we haven't gotten to them yet.
		 */
		if (s->btps_doupper)
		{
			if (s->btps_next == (BTPageState *) NULL)
			{
				opaque->btpo_flags |= BTP_ROOT;
				_bt_metaproot(index, blkno, s->btps_level + 1);
			}
			else
			{
				bti = _bt_minitem(s->btps_page, blkno, 0);
				_bt_buildadd(index, s->btps_next, bti, 0);
				pfree((void *) bti);
			}
		}
1095

1096 1097 1098 1099 1100 1101 1102
		/*
		 * this is the rightmost page, so the ItemId array needs to be
		 * slid back one slot.
		 */
		_bt_slideleft(index, s->btps_buf, s->btps_page);
		_bt_wrtbuf(index, s->btps_buf);
	}
1103 1104 1105 1106 1107 1108 1109
}

/*
 * take the input tapes stored by 'btspool' and perform successive
 * merging passes until at most one run is left in each tape.  at that
 * point, merge the final tape runs into a set of btree leaves.
 *
1110
 * XXX three nested loops?	gross.	cut me up into smaller routines.
1111
 */
1112
static void
B
Bruce Momjian 已提交
1113
_bt_merge(Relation index, BTSpool *btspool)
1114
{
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
	BTPageState *state;
	BTPriQueue	q;
	BTPriQueueElem e;
	BTSortKey	btsk;
	BTItem		bti;
	BTTapeBlock *itape;
	BTTapeBlock *otape;
	char	   *tapepos[MAXTAPES];
	int			tapedone[MAXTAPES];
	int			t;
	int			goodtapes;
	int			npass;
	int			nruns;
	Size		btisz;
	bool		doleaf = false;
1130

1131
	/*
1132
	 * initialize state needed for the merge into the btree leaf pages.
1133
	 */
1134 1135 1136 1137 1138 1139
	state = (BTPageState *) _bt_pagestate(index, BTP_LEAF, 0, true);

	npass = 0;
	do
	{							/* pass */

1140
		/*
1141 1142 1143 1144
		 * each pass starts by flushing the previous outputs and swapping
		 * inputs and outputs.	flushing sets End-of-Run for any dirty
		 * output tapes.  swapping clears the new output tapes and rewinds
		 * the new input tapes.
1145
		 */
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
		btspool->bts_tape = btspool->bts_ntapes - 1;
		_bt_spoolflush(btspool);
		_bt_spoolswap(btspool);

		++npass;
		nruns = 0;

		for (;;)
		{						/* run */

			/*
			 * each run starts by selecting a new output tape.	the merged
			 * results of a given run are always sent to this one tape.
			 */
			btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes;
			otape = btspool->bts_otape[btspool->bts_tape];

			/*
			 * initialize the priority queue by loading it with the first
			 * element of the given run in each tape.  since we are
			 * starting a new run, we reset the tape (clearing the
			 * End-Of-Run marker) before reading it.  this means that
			 * _bt_taperead will return 0 only if the tape is actually at
			 * EOF.
			 */
B
Bruce Momjian 已提交
1171
			MemSet((char *) &q, 0, sizeof(BTPriQueue));
1172 1173
			goodtapes = 0;
			for (t = 0; t < btspool->bts_ntapes; ++t)
1174
			{
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
				itape = btspool->bts_itape[t];
				tapepos[t] = itape->bttb_data;
				tapedone[t] = 0;
				_bt_tapereset(itape);
				do
				{
					if (_bt_taperead(itape) == 0)
						tapedone[t] = 1;
				} while (!tapedone[t] && EMPTYTAPE(itape));
				if (!tapedone[t])
				{
					++goodtapes;
					e.btpqe_tape = t;
					_bt_setsortkey(index, _bt_tapenext(itape, &tapepos[t]),
								   &(e.btpqe_item));
					if (e.btpqe_item.btsk_item != (BTItem) NULL)
						_bt_pqadd(&q, &e);
				}
1193
			}
1194 1195 1196 1197 1198 1199 1200

			/*
			 * if we don't have any tapes with any input (i.e., they are
			 * all at EOF), there is no work to do in this run -- we must
			 * be done with this pass.
			 */
			if (goodtapes == 0)
1201
			{
1202
				break;			/* for */
1203
			}
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
			++nruns;

			/*
			 * output the smallest element from the queue until there are
			 * no more.
			 */
			while (_bt_pqnext(&q, &e) >= 0)
			{					/* item */

				/*
				 * replace the element taken from priority queue, fetching
				 * a new block if needed.  a tape can run out if it hits
				 * either End-Of-Run or EOF.
				 */
				t = e.btpqe_tape;
				btsk = e.btpqe_item;
				bti = btsk.btsk_item;
				if (bti != (BTItem) NULL)
				{
					btisz = BTITEMSZ(bti);
1224
					btisz = MAXALIGN(btisz);
1225 1226 1227 1228 1229
					if (doleaf)
					{
						_bt_buildadd(index, state, bti, BTP_LEAF);
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
						{
1230 1231
							bool		isnull;
							Datum		d = index_getattr(&(bti->bti_itup), 1,
1232 1233 1234 1235 1236 1237
												 index->rd_att, &isnull);

							printf("_bt_merge: [pass %d run %d] inserted <%x> from tape %d into block %d\n",
								   npass, nruns, d, t,
								   BufferGetBlockNumber(state->btps_buf));
						}
1238
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
					}
					else
					{
						if (SPCLEFT(otape) < btisz)
						{

							/*
							 * if it's full, write it out and add the item
							 * to the next block.  (since we will be
							 * adding another tuple immediately after
							 * this, we can be sure that there will be at
							 * least one more block in this run and so we
							 * know we do *not* want to set End-Of-Run
							 * here.)
							 */
							_bt_tapewrite(otape, 0);
						}
						_bt_tapeadd(otape, bti, btisz);
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
						{
1259 1260
							bool		isnull;
							Datum		d = index_getattr(&(bti->bti_itup), 1,
1261 1262 1263 1264 1265 1266
												 index->rd_att, &isnull);

							printf("_bt_merge: [pass %d run %d] inserted <%x> from tape %d into output tape %d\n",
								   npass, nruns, d, t,
								   btspool->bts_tape);
						}
1267
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
					}

					if (btsk.btsk_datum != (Datum *) NULL)
						pfree((void *) (btsk.btsk_datum));
					if (btsk.btsk_nulls != (char *) NULL)
						pfree((void *) (btsk.btsk_nulls));

				}
				itape = btspool->bts_itape[t];
				if (!tapedone[t])
				{
1279
					BTItem		newbti = _bt_tapenext(itape, &tapepos[t]);
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295

					if (newbti == (BTItem) NULL)
					{
						do
						{
							if (_bt_taperead(itape) == 0)
								tapedone[t] = 1;
						} while (!tapedone[t] && EMPTYTAPE(itape));
						if (!tapedone[t])
						{
							tapepos[t] = itape->bttb_data;
							newbti = _bt_tapenext(itape, &tapepos[t]);
						}
					}
					if (newbti != (BTItem) NULL)
					{
1296
						BTPriQueueElem nexte;
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323

						nexte.btpqe_tape = t;
						_bt_setsortkey(index, newbti, &(nexte.btpqe_item));
						_bt_pqadd(&q, &nexte);
					}
				}
			}					/* item */

			/*
			 * that's it for this run.  flush the output tape, marking
			 * End-of-Run.
			 */
			_bt_tapewrite(otape, 1);
		}						/* run */

		/*
		 * we are here because we ran out of input on all of the input
		 * tapes.
		 *
		 * if this pass did not generate more actual output runs than we have
		 * tapes, we know we have at most one run in each tape.  this
		 * means that we are ready to merge into the final btree leaf
		 * pages instead of merging into a tape file.
		 */
		if (nruns <= btspool->bts_ntapes)
			doleaf = true;
	} while (nruns > 0);		/* pass */
1324

1325
	_bt_uppershutdown(index, state);
1326 1327 1328 1329
}


/*
1330 1331 1332 1333 1334 1335 1336 1337 1338
 * given the (appropriately side-linked) leaf pages of a btree,
 * construct the corresponding upper levels.  we do this by inserting
 * minimum keys from each page into parent pages as needed.  the
 * format of the internal pages is otherwise the same as for leaf
 * pages.
 *
 * this routine is not called during conventional bulk-loading (in
 * which case we can just build the upper levels as we create the
 * sorted bottom level).  it is only used for index recycling.
1339
 */
1340
#ifdef NOT_USED
1341
void
1342
_bt_upperbuild(Relation index)
1343
{
1344 1345 1346 1347 1348 1349
	Buffer		rbuf;
	BlockNumber blk;
	Page		rpage;
	BTPageOpaque ropaque;
	BTPageState *state;
	BTItem		nbti;
1350 1351 1352 1353 1354 1355 1356

	/*
	 * find the first leaf block.  while we're at it, clear the BTP_ROOT
	 * flag that we set while building it (so we could find it later).
	 */
	rbuf = _bt_getroot(index, BT_WRITE);
	blk = BufferGetBlockNumber(rbuf);
1357 1358
	rpage = BufferGetPage(rbuf);
	ropaque = (BTPageOpaque) PageGetSpecialPointer(rpage);
1359 1360 1361 1362 1363 1364 1365 1366
	ropaque->btpo_flags &= ~BTP_ROOT;
	_bt_wrtbuf(index, rbuf);

	state = (BTPageState *) _bt_pagestate(index, 0, 0, true);

	/* for each page... */
	do
	{
M
 
Marc G. Fournier 已提交
1367
#ifdef NOT_USED
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
		printf("\t\tblk=%d\n", blk);
#endif
		rbuf = _bt_getbuf(index, blk, BT_READ);
		rpage = BufferGetPage(rbuf);
		ropaque = (BTPageOpaque) PageGetSpecialPointer(rpage);

		/* for each item... */
		if (!PageIsEmpty(rpage))
		{

			/*
			 * form a new index tuple corresponding to the minimum key of
			 * the lower page and insert it into a page at this level.
			 */
			nbti = _bt_minitem(rpage, blk, P_RIGHTMOST(ropaque));
1383
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
1384
			{
1385 1386 1387
				bool		isnull;
				Datum		d = index_getattr(&(nbti->bti_itup), 1, index->rd_att,
											  &isnull);
1388 1389 1390 1391

				printf("_bt_upperbuild: inserting <%x> at %d\n",
					   d, state->btps_level);
			}
1392
#endif	 /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
1393 1394 1395 1396 1397 1398 1399 1400
			_bt_buildadd(index, state, nbti, 0);
			pfree((void *) nbti);
		}
		blk = ropaque->btpo_next;
		_bt_relbuf(index, rbuf, BT_READ);
	} while (blk != P_NONE);

	_bt_uppershutdown(index, state);
1401
}
1402

1403
#endif
1404 1405 1406 1407 1408 1409 1410 1411

/*
 * given a spool loading by successive calls to _bt_spool, create an
 * entire btree.
 */
void
_bt_leafbuild(Relation index, void *spool)
{
1412
	_bt_isortcmpinit(index, (BTSpool *) spool);
1413 1414

#ifdef BTREE_BUILD_STATS
1415 1416 1417 1418 1419 1420
	if (ShowExecutorStats)
	{
		fprintf(stderr, "! BtreeBuild (Spool) Stats:\n");
		ShowUsage();
		ResetUsage();
	}
1421 1422
#endif

1423
	_bt_merge(index, (BTSpool *) spool);
1424

1425
}