nbtree.c 14.7 KB
Newer Older
1 2 3
/*-------------------------------------------------------------------------
 *
 * btree.c--
4 5
 *	  Implementation of Lehman and Yao's btree management algorithm for
 *	  Postgres.
6 7 8 9 10
 *
 * Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
11
 *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.27 1998/07/27 19:37:40 vadim Exp $
12 13
 *
 * NOTES
14
 *	  This file contains only the public interface routines.
15 16 17 18
 *
 *-------------------------------------------------------------------------
 */

M
Marc G. Fournier 已提交
19 20 21 22 23 24 25 26 27 28 29 30
#include <postgres.h>

#include <access/genam.h>
#include <storage/bufpage.h>
#include <storage/bufmgr.h>
#include <access/nbtree.h>
#include <executor/executor.h>
#include <access/heapam.h>
#include <catalog/index.h>
#include <miscadmin.h>

#ifndef HAVE_MEMMOVE
31
#include <regex/utils.h>
M
Marc G. Fournier 已提交
32
#else
33
#include <string.h>
M
Marc G. Fournier 已提交
34
#endif
35

36 37
#ifdef BTREE_BUILD_STATS
#include <tcop/tcopprot.h>
38
extern int	ShowExecutorStats;
39

40 41 42
#endif


43 44 45
bool		BuildingBtree = false;		/* see comment in btbuild() */
bool		FastBuild = true;	/* use sort/build instead of insertion
								 * build */
46 47

/*
48
 *	btbuild() -- build a new btree index.
49
 *
50 51 52 53
 *		We use a global variable to record the fact that we're creating
 *		a new index.  This is used to avoid high-concurrency locking,
 *		since the index won't be visible until this transaction commits
 *		and since building is guaranteed to be single-threaded.
54 55 56
 */
void
btbuild(Relation heap,
57 58
		Relation index,
		int natts,
B
Bruce Momjian 已提交
59
		AttrNumber *attnum,
60 61
		IndexStrategy istrat,
		uint16 pcount,
62 63 64
		Datum *params,
		FuncIndexInfo *finfo,
		PredInfo *predInfo)
65
{
66 67 68 69 70 71 72 73
	HeapScanDesc hscan;
	Buffer		buffer;
	HeapTuple	htup;
	IndexTuple	itup;
	TupleDesc	htupdesc,
				itupdesc;
	Datum	   *attdata;
	bool	   *nulls;
74
	InsertIndexResult res = 0;
75 76 77 78
	int			nhtups,
				nitups;
	int			i;
	BTItem		btitem;
79

80
#ifndef OMIT_PARTIAL_INDEX
81 82
	ExprContext *econtext = (ExprContext *) NULL;
	TupleTable	tupleTable = (TupleTable) NULL;
83
	TupleTableSlot *slot = (TupleTableSlot *) NULL;
84

85
#endif
86 87 88 89 90 91 92
	Oid			hrelid,
				irelid;
	Node	   *pred,
			   *oldPred;
	void	   *spool = (void *) NULL;
	bool		isunique;
	bool		usefast;
93

94 95 96 97 98
	/* note that this is a new btree */
	BuildingBtree = true;

	pred = predInfo->pred;
	oldPred = predInfo->oldPred;
99 100

	/*
101 102 103 104
	 * bootstrap processing does something strange, so don't use
	 * sort/build for initial catalog indices.	at some point i need to
	 * look harder at this.  (there is some kind of incremental processing
	 * going on there.) -- pma 08/29/95
105
	 */
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	usefast = (FastBuild && IsNormalProcessingMode());

#ifdef BTREE_BUILD_STATS
	if (ShowExecutorStats)
		ResetUsage();
#endif

	/* see if index is unique */
	isunique = IndexIsUniqueNoCache(RelationGetRelationId(index));

	/* initialize the btree index metadata page (if this is a new index) */
	if (oldPred == NULL)
		_bt_metapinit(index);

	/* get tuple descriptors for heap and index relations */
	htupdesc = RelationGetTupleDescriptor(heap);
	itupdesc = RelationGetTupleDescriptor(index);

	/* get space for data items that'll appear in the index tuple */
	attdata = (Datum *) palloc(natts * sizeof(Datum));
	nulls = (bool *) palloc(natts * sizeof(bool));

128
	/*
129 130 131 132 133 134
	 * If this is a predicate (partial) index, we will need to evaluate
	 * the predicate using ExecQual, which requires the current tuple to
	 * be in a slot of a TupleTable.  In addition, ExecQual must have an
	 * ExprContext referring to that slot.	Here, we initialize dummy
	 * TupleTable and ExprContext objects for this purpose. --Nels, Feb
	 * '92
135
	 */
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#ifndef OMIT_PARTIAL_INDEX
	if (pred != NULL || oldPred != NULL)
	{
		tupleTable = ExecCreateTupleTable(1);
		slot = ExecAllocTableSlot(tupleTable);
		econtext = makeNode(ExprContext);
		FillDummyExprContext(econtext, slot, htupdesc, InvalidBuffer);

		/*
		 * we never want to use sort/build if we are extending an existing
		 * partial index -- it works by inserting the newly-qualifying
		 * tuples into the existing index. (sort/build would overwrite the
		 * existing index with one consisting of the newly-qualifying
		 * tuples.)
		 */
		usefast = false;
	}
#endif							/* OMIT_PARTIAL_INDEX */

	/* start a heap scan */
156
	hscan = heap_beginscan(heap, 0, SnapshotNow, 0, (ScanKey) NULL);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	htup = heap_getnext(hscan, 0, &buffer);

	/* build the index */
	nhtups = nitups = 0;

	if (usefast)
	{
		spool = _bt_spoolinit(index, 7, isunique);
		res = (InsertIndexResult) NULL;
	}

	for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer))
	{

		nhtups++;

		/*
		 * If oldPred != NULL, this is an EXTEND INDEX command, so skip
		 * this tuple if it was already in the existing partial index
		 */
		if (oldPred != NULL)
		{
179 180
#ifndef OMIT_PARTIAL_INDEX

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
			/* SetSlotContents(slot, htup); */
			slot->val = htup;
			if (ExecQual((List *) oldPred, econtext) == true)
			{
				nitups++;
				continue;
			}
#endif							/* OMIT_PARTIAL_INDEX */
		}

		/*
		 * Skip this tuple if it doesn't satisfy the partial-index
		 * predicate
		 */
		if (pred != NULL)
		{
#ifndef OMIT_PARTIAL_INDEX
			/* SetSlotContents(slot, htup); */
			slot->val = htup;
			if (ExecQual((List *) pred, econtext) == false)
				continue;
#endif							/* OMIT_PARTIAL_INDEX */
		}

205
		nitups++;
206 207 208 209 210 211 212 213

		/*
		 * For the current heap tuple, extract all the attributes we use
		 * in this index, and note which are null.
		 */

		for (i = 1; i <= natts; i++)
		{
214 215
			int			attoff;
			bool		attnull;
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

			/*
			 * Offsets are from the start of the tuple, and are
			 * zero-based; indices are one-based.  The next call returns i
			 * - 1.  That's data hiding for you.
			 */

			attoff = AttrNumberGetAttrOffset(i);
			attdata[attoff] = GetIndexValue(htup,
											htupdesc,
											attoff,
											attnum,
											finfo,
											&attnull,
											buffer);
			nulls[attoff] = (attnull ? 'n' : ' ');
		}

		/* form an index tuple and point it at the heap tuple */
		itup = index_formtuple(itupdesc, attdata, nulls);

		/*
		 * If the single index key is null, we don't insert it into the
		 * index.  Btrees support scans on <, <=, =, >=, and >. Relational
		 * algebra says that A op B (where op is one of the operators
		 * above) returns null if either A or B is null.  This means that
		 * no qualification used in an index scan could ever return true
		 * on a null attribute.  It also means that indices can't be used
		 * by ISNULL or NOTNULL scans, but that's an artifact of the
		 * strategy map architecture chosen in 1986, not of the way nulls
		 * are handled here.
		 */

		/*
		 * New comments: NULLs handling. While we can't do NULL
		 * comparison, we can follow simple rule for ordering items on
		 * btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
		 * Sure, it's just rule for placing/finding items and no more -
		 * keytest'll return FALSE for a = 5 for items having 'a' isNULL.
		 * Look at _bt_skeycmp, _bt_compare and _bt_itemcmp for how it
		 * works.				 - vadim 03/23/97
		 *
		 * if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
		 */

		itup->t_tid = htup->t_ctid;
		btitem = _bt_formitem(itup);

		/*
		 * if we are doing bottom-up btree build, we insert the index into
		 * a spool page for subsequent processing.	otherwise, we insert
		 * into the btree.
		 */
		if (usefast)
			_bt_spool(index, btitem, spool);
		else
			res = _bt_doinsert(index, btitem, isunique, heap);

		pfree(btitem);
		pfree(itup);
		if (res)
			pfree(res);
278
	}
279 280 281 282 283 284

	/* okay, all heap tuples are indexed */
	heap_endscan(hscan);

	if (pred != NULL || oldPred != NULL)
	{
285
#ifndef OMIT_PARTIAL_INDEX
286 287 288
		ExecDestroyTupleTable(tupleTable, true);
		pfree(econtext);
#endif							/* OMIT_PARTIAL_INDEX */
289
	}
290

291
	/*
292 293 294 295
	 * if we are doing bottom-up btree build, we now have a bunch of
	 * sorted runs in the spool pages.	finish the build by (1) merging
	 * the runs, (2) inserting the sorted tuples into btree pages and (3)
	 * building the upper levels.
296
	 */
297 298 299 300 301
	if (usefast)
	{
		_bt_spool(index, (BTItem) NULL, spool); /* flush the spool */
		_bt_leafbuild(index, spool);
		_bt_spooldestroy(spool);
302
	}
303 304 305 306 307 308 309

#ifdef BTREE_BUILD_STATS
	if (ShowExecutorStats)
	{
		fprintf(stderr, "! BtreeBuild Stats:\n");
		ShowUsage();
		ResetUsage();
310
	}
311
#endif
312 313

	/*
314 315 316 317 318 319 320
	 * Since we just counted the tuples in the heap, we update its stats
	 * in pg_class to guarantee that the planner takes advantage of the
	 * index we just created. Finally, only update statistics during
	 * normal index definitions, not for indices on system catalogs
	 * created during bootstrap processing.  We must close the relations
	 * before updatings statistics to guarantee that the relcache entries
	 * are flushed when we increment the command counter in UpdateStats().
321
	 */
322 323 324 325 326 327 328 329 330 331 332 333 334 335
	if (IsNormalProcessingMode())
	{
		hrelid = heap->rd_id;
		irelid = index->rd_id;
		heap_close(heap);
		index_close(index);
		UpdateStats(hrelid, nhtups, true);
		UpdateStats(irelid, nitups, false);
		if (oldPred != NULL)
		{
			if (nitups == nhtups)
				pred = NULL;
			UpdateIndexPredicate(irelid, oldPred, pred);
		}
336 337
	}

338 339
	pfree(nulls);
	pfree(attdata);
340

341 342
	/* all done */
	BuildingBtree = false;
343 344 345
}

/*
346
 *	btinsert() -- insert an index tuple into a btree.
347
 *
348 349 350
 *		Descend the tree recursively, find the appropriate location for our
 *		new tuple, put it there, set its unique OID as appropriate, and
 *		return an InsertIndexResult to the caller.
351 352
 */
InsertIndexResult
353
btinsert(Relation rel, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation heapRel)
354
{
355 356
	BTItem		btitem;
	IndexTuple	itup;
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	InsertIndexResult res;

	/* generate an index tuple */
	itup = index_formtuple(RelationGetTupleDescriptor(rel), datum, nulls);
	itup->t_tid = *ht_ctid;

	/*
	 * See comments in btbuild.
	 *
	 * if (itup->t_info & INDEX_NULL_MASK) return ((InsertIndexResult) NULL);
	 */

	btitem = _bt_formitem(itup);

	res = _bt_doinsert(rel, btitem,
					 IndexIsUnique(RelationGetRelationId(rel)), heapRel);

	pfree(btitem);
	pfree(itup);

	/* adjust any active scans that will be affected by this insertion */
	_bt_adjscans(rel, &(res->pointerData), BT_INSERT);

	return (res);
381 382 383
}

/*
384
 *	btgettuple() -- Get the next tuple in the scan.
385
 */
386
char *
387 388
btgettuple(IndexScanDesc scan, ScanDirection dir)
{
389 390 391 392 393 394 395 396 397 398 399 400 401 402
	RetrieveIndexResult res;

	/*
	 * If we've already initialized this scan, we can just advance it in
	 * the appropriate direction.  If we haven't done so yet, we call a
	 * routine to get the first item in the scan.
	 */

	if (ItemPointerIsValid(&(scan->currentItemData)))
		res = _bt_next(scan, dir);
	else
		res = _bt_first(scan, dir);

	return ((char *) res);
403 404 405
}

/*
406
 *	btbeginscan() -- start a scan on a btree index
407
 */
408
char *
409 410
btbeginscan(Relation rel, bool fromEnd, uint16 keysz, ScanKey scankey)
{
411
	IndexScanDesc scan;
412 413 414 415 416 417 418 419

	/* get the scan */
	scan = RelationGetIndexScan(rel, fromEnd, keysz, scankey);

	/* register scan in case we change pages it's using */
	_bt_regscan(scan);

	return ((char *) scan);
420 421 422
}

/*
423
 *	btrescan() -- rescan an index relation
424 425 426 427
 */
void
btrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey)
{
428 429
	ItemPointer iptr;
	BTScanOpaque so;
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

	so = (BTScanOpaque) scan->opaque;

	/* we hold a read lock on the current page in the scan */
	if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
	{
		_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
		so->btso_curbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	/* and we hold a read lock on the last marked item in the scan */
	if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
	{
		_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
		so->btso_mrkbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	if (so == NULL)				/* if called from btbeginscan */
	{
		so = (BTScanOpaque) palloc(sizeof(BTScanOpaqueData));
		so->btso_curbuf = so->btso_mrkbuf = InvalidBuffer;
		so->keyData = (ScanKey) NULL;
		if (scan->numberOfKeys > 0)
			so->keyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData));
		scan->opaque = so;
		scan->flags = 0x0;
	}

	/*
	 * Reset the scan keys. Note that keys ordering stuff moved to
	 * _bt_first.	   - vadim 05/05/97
	 */
	so->numberOfKeys = scan->numberOfKeys;
	if (scan->numberOfKeys > 0)
	{
		memmove(scan->keyData,
				scankey,
				scan->numberOfKeys * sizeof(ScanKeyData));
		memmove(so->keyData,
				scankey,
				so->numberOfKeys * sizeof(ScanKeyData));
	}
M
Fixes:  
Marc G. Fournier 已提交
474

475 476 477 478 479
}

void
btmovescan(IndexScanDesc scan, Datum v)
{
480 481
	ItemPointer iptr;
	BTScanOpaque so;
482 483 484 485 486 487 488 489 490 491 492 493 494

	so = (BTScanOpaque) scan->opaque;

	/* release any locks we still hold */
	if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
	{
		_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
		so->btso_curbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

/*	  scan->keyData[0].sk_argument = v; */
	so->keyData[0].sk_argument = v;
495 496 497
}

/*
498
 *	btendscan() -- close down a scan
499 500 501 502
 */
void
btendscan(IndexScanDesc scan)
{
503 504
	ItemPointer iptr;
	BTScanOpaque so;
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

	so = (BTScanOpaque) scan->opaque;

	/* release any locks we still hold */
	if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
	{
		if (BufferIsValid(so->btso_curbuf))
			_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
		so->btso_curbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
	{
		if (BufferIsValid(so->btso_mrkbuf))
			_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
		so->btso_mrkbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	if (so->keyData != (ScanKey) NULL)
		pfree(so->keyData);
	pfree(so);

	_bt_dropscan(scan);
530 531 532
}

/*
533
 *	btmarkpos() -- save current scan position
534 535 536 537
 */
void
btmarkpos(IndexScanDesc scan)
{
538 539
	ItemPointer iptr;
	BTScanOpaque so;
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558

	so = (BTScanOpaque) scan->opaque;

	/* release lock on old marked data, if any */
	if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
	{
		_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
		so->btso_mrkbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	/* bump lock on currentItemData and copy to currentMarkData */
	if (ItemPointerIsValid(&(scan->currentItemData)))
	{
		so->btso_mrkbuf = _bt_getbuf(scan->relation,
								   BufferGetBlockNumber(so->btso_curbuf),
									 BT_READ);
		scan->currentMarkData = scan->currentItemData;
	}
559 560 561
}

/*
562
 *	btrestrpos() -- restore scan to last saved position
563 564 565 566
 */
void
btrestrpos(IndexScanDesc scan)
{
567 568
	ItemPointer iptr;
	BTScanOpaque so;
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

	so = (BTScanOpaque) scan->opaque;

	/* release lock on current data, if any */
	if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
	{
		_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
		so->btso_curbuf = InvalidBuffer;
		ItemPointerSetInvalid(iptr);
	}

	/* bump lock on currentMarkData and copy to currentItemData */
	if (ItemPointerIsValid(&(scan->currentMarkData)))
	{
		so->btso_curbuf = _bt_getbuf(scan->relation,
								   BufferGetBlockNumber(so->btso_mrkbuf),
									 BT_READ);

		scan->currentItemData = scan->currentMarkData;
	}
589 590 591 592 593 594
}

/* stubs */
void
btdelete(Relation rel, ItemPointer tid)
{
595 596 597 598 599
	/* adjust any active scans that will be affected by this deletion */
	_bt_adjscans(rel, tid, BT_DELETE);

	/* delete the data from the page */
	_bt_pagedel(rel, tid);
600
}