From 6889303531187f7867a5dfad5f5b5ba103f7cdd6 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 12 Sep 2007 22:10:26 +0000 Subject: [PATCH] Redefine the lp_flags field of item pointers as having four states, rather than two independent bits (one of which was never used in heap pages anyway, or at least hadn't been in a very long time). This gives us flexibility to add the HOT notions of redirected and dead item pointers without requiring anything so klugy as magic values of lp_off and lp_len. The state values are chosen so that for the states currently in use (pre-HOT) there is no change in the physical representation. --- contrib/pageinspect/btreefuncs.c | 2 +- contrib/pageinspect/heapfuncs.c | 12 +- contrib/pgstattuple/pgstattuple.c | 4 +- src/backend/access/gin/ginentrypage.c | 10 +- src/backend/access/gin/ginvacuum.c | 4 +- src/backend/access/gin/ginxlog.c | 10 +- src/backend/access/gist/gist.c | 4 +- src/backend/access/gist/gistget.c | 8 +- src/backend/access/gist/gistutil.c | 4 +- src/backend/access/gist/gistvacuum.c | 4 +- src/backend/access/hash/hash.c | 10 +- src/backend/access/hash/hashinsert.c | 4 +- src/backend/access/hash/hashovfl.c | 4 +- src/backend/access/hash/hashpage.c | 4 +- src/backend/access/heap/heapam.c | 34 +++--- src/backend/access/heap/hio.c | 4 +- src/backend/access/heap/rewriteheap.c | 4 +- src/backend/access/nbtree/README | 18 +-- src/backend/access/nbtree/nbtinsert.c | 30 ++--- src/backend/access/nbtree/nbtpage.c | 4 +- src/backend/access/nbtree/nbtsort.c | 6 +- src/backend/access/nbtree/nbtutils.c | 12 +- src/backend/access/nbtree/nbtxlog.c | 12 +- src/backend/commands/sequence.c | 6 +- src/backend/commands/trigger.c | 4 +- src/backend/commands/vacuum.c | 14 +-- src/backend/commands/vacuumlazy.c | 4 +- src/backend/executor/nodeBitmapHeapscan.c | 6 +- src/backend/storage/page/bufpage.c | 71 ++++++----- src/include/storage/bufpage.h | 8 +- src/include/storage/itemid.h | 142 +++++++++++++++++----- 31 files changed, 278 insertions(+), 185 deletions(-) diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index e8063d42c6..6251fd5b5f 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -140,7 +140,7 @@ GetBTPageStatistics(BlockNumber blkno, Buffer buffer, BTPageStat * stat) item_size += IndexTupleSize(itup); - if (!ItemIdDeleted(id)) + if (!ItemIdIsDead(id)) stat->live_items++; else stat->dead_items++; diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index abf50de5fa..931c1a5036 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -18,7 +18,7 @@ * Copyright (c) 2007, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.1 2007/05/17 19:11:24 momjian Exp $ + * $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.2 2007/09/12 22:10:25 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -156,15 +156,15 @@ heap_page_items(PG_FUNCTION_ARGS) * could be corrupt in many other ways, but at least we won't * crash. */ - if ((lp_len >= sizeof(HeapTupleHeader)) && - (lp_offset == MAXALIGN(lp_offset)) && - (lp_offset + lp_len <= raw_page_size) && - ItemIdIsUsed(id)) + if (ItemIdHasStorage(id) && + lp_len >= sizeof(HeapTupleHeader) && + lp_offset == MAXALIGN(lp_offset) && + lp_offset + lp_len <= raw_page_size) { HeapTupleHeader tuphdr; int bits_len; - /* Extract infromation from the tuple header */ + /* Extract information from the tuple header */ tuphdr = (HeapTupleHeader) PageGetItem(page, id); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 9072a37aa9..6e3271ea75 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -1,5 +1,5 @@ /* - * $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.28 2007/08/26 23:59:50 tgl Exp $ + * $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.29 2007/09/12 22:10:25 tgl Exp $ * * Copyright (c) 2001,2002 Tatsuo Ishii * @@ -477,7 +477,7 @@ pgstat_index_page(pgstattuple_type * stat, Page page, { ItemId itemid = PageGetItemId(page, i); - if (ItemIdDeleted(itemid)) + if (ItemIdIsDead(itemid)) { stat->dead_tuple_count++; stat->dead_tuple_len += ItemIdGetLength(itemid); diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index bf1d850172..a5253da021 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.7 2007/06/04 15:56:28 teodor Exp $ + * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.8 2007/09/12 22:10:25 tgl Exp $ *------------------------------------------------------------------------- */ @@ -359,7 +359,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd *prdata = rdata; data.updateBlkno = entryPreparePage(btree, page, off); - placed = PageAddItem(page, (Item) btree->entry, IndexTupleSize(btree->entry), off, LP_USED); + placed = PageAddItem(page, (Item) btree->entry, IndexTupleSize(btree->entry), off, false); if (placed != off) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(btree->index)); @@ -488,7 +488,7 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR lsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData); } - if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(btree->index)); ptr += MAXALIGN(IndexTupleSize(itup)); @@ -563,11 +563,11 @@ entryFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf) page = BufferGetPage(root); itup = ginPageGetLinkItup(lbuf); - if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index root page"); itup = ginPageGetLinkItup(rbuf); - if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index root page"); } diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 2a636762c0..91f7f3e5f8 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.15 2007/06/05 12:47:49 teodor Exp $ + * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.16 2007/09/12 22:10:25 tgl Exp $ *------------------------------------------------------------------------- */ @@ -544,7 +544,7 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3 itup = GinFormTuple(&gvs->ginstate, value, GinGetPosting(itup), newN); PageIndexTupleDelete(tmppage, i); - if (PageAddItem(tmppage, (Item) itup, IndexTupleSize(itup), i, LP_USED) != i) + if (PageAddItem(tmppage, (Item) itup, IndexTupleSize(itup), i, false) != i) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(gvs->index)); diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 9d246783d6..db2d6b3933 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.7 2007/06/04 15:56:28 teodor Exp $ + * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.8 2007/09/12 22:10:25 tgl Exp $ *------------------------------------------------------------------------- */ #include "postgres.h" @@ -199,7 +199,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogInsert)); - if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in %u/%u/%u", data->node.spcNode, data->node.dbNode, data->node.relNode); @@ -281,7 +281,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) for (i = 0; i < data->separator; i++) { - if (PageAddItem(lpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(lpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in %u/%u/%u", data->node.spcNode, data->node.dbNode, data->node.relNode); itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup))); @@ -289,7 +289,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) for (i = data->separator; i < data->nitem; i++) { - if (PageAddItem(rpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(rpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in %u/%u/%u", data->node.spcNode, data->node.dbNode, data->node.relNode); itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup))); @@ -375,7 +375,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) for (i = 0; i < data->nitem; i++) { - if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in %u/%u/%u", data->node.spcNode, data->node.dbNode, data->node.relNode); itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup))); diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 21d3114676..fce9a94eba 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.145 2007/01/05 22:19:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.146 2007/09/12 22:10:25 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -366,7 +366,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate) data = (char *) (ptr->list); for (i = 0; i < ptr->block.num; i++) { - if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(state->r)); data += IndexTupleSize((IndexTuple) data); } diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index ed839de403..ba7a8ab959 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.66 2007/05/27 03:50:38 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.67 2007/09/12 22:10:25 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -46,7 +46,7 @@ killtuple(Relation r, GISTScanOpaque so, ItemPointer iptr) { /* page unchanged, so all is simple */ offset = ItemPointerGetOffsetNumber(iptr); - PageGetItemId(p, offset)->lp_flags |= LP_DELETE; + ItemIdMarkDead(PageGetItemId(p, offset)); SetBufferCommitInfoNeedsSave(buffer); LockBuffer(buffer, GIST_UNLOCK); break; @@ -61,7 +61,7 @@ killtuple(Relation r, GISTScanOpaque so, ItemPointer iptr) if (ItemPointerEquals(&(ituple->t_tid), iptr)) { /* found */ - PageGetItemId(p, offset)->lp_flags |= LP_DELETE; + ItemIdMarkDead(PageGetItemId(p, offset)); SetBufferCommitInfoNeedsSave(buffer); LockBuffer(buffer, GIST_UNLOCK); if (buffer != so->curbuf) @@ -289,7 +289,7 @@ gistnext(IndexScanDesc scan, ScanDirection dir, ItemPointer tids, ItemPointerSet(&(so->curpos), BufferGetBlockNumber(so->curbuf), n); - if (!(ignore_killed_tuples && ItemIdDeleted(PageGetItemId(p, n)))) + if (!(ignore_killed_tuples && ItemIdIsDead(PageGetItemId(p, n)))) { it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); tids[ntids] = scan->xs_ctup.t_self = it->t_tid; diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index c09e9249ea..6d4f31d53b 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.22 2007/04/09 22:03:57 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.23 2007/09/12 22:10:25 tgl Exp $ *------------------------------------------------------------------------- */ #include "postgres.h" @@ -42,7 +42,7 @@ gistfillbuffer(Relation r, Page page, IndexTuple *itup, for (i = 0; i < len; i++) { l = PageAddItem(page, (Item) itup[i], IndexTupleSize(itup[i]), - off, LP_USED); + off, false); if (l == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(r)); diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 1d2ca735a6..0abd0197ad 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.30 2007/05/31 14:03:09 teodor Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.31 2007/09/12 22:10:25 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -201,7 +201,7 @@ vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon, data = (char *) (ptr->list); for (i = 0; i < ptr->block.num; i++) { - if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(gv->index)); data += IndexTupleSize((IndexTuple) data); } diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 57c5422471..d3f54c934b 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.95 2007/05/30 20:11:51 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.96 2007/09/12 22:10:25 tgl Exp $ * * NOTES * This file contains only the public interface routines. @@ -193,11 +193,11 @@ hashgettuple(PG_FUNCTION_ARGS) if (scan->kill_prior_tuple) { /* - * Yes, so mark it by setting the LP_DELETE bit in the item flags. + * Yes, so mark it by setting the LP_DEAD state in the item flags. */ offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos)); page = BufferGetPage(so->hashso_curbuf); - PageGetItemId(page, offnum)->lp_flags |= LP_DELETE; + ItemIdMarkDead(PageGetItemId(page, offnum)); /* * Since this can be redone later if needed, it's treated the same @@ -224,7 +224,7 @@ hashgettuple(PG_FUNCTION_ARGS) { offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos)); page = BufferGetPage(so->hashso_curbuf); - if (!ItemIdDeleted(PageGetItemId(page, offnum))) + if (!ItemIdIsDead(PageGetItemId(page, offnum))) break; res = _hash_next(scan, dir); } @@ -286,7 +286,7 @@ hashgetmulti(PG_FUNCTION_ARGS) offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos)); page = BufferGetPage(so->hashso_curbuf); - if (!ItemIdDeleted(PageGetItemId(page, offnum))) + if (!ItemIdIsDead(PageGetItemId(page, offnum))) break; res = _hash_next(scan, ForwardScanDirection); } diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 65da93640e..c82ad0ad9f 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.45 2007/05/03 16:45:58 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.46 2007/09/12 22:10:25 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -200,7 +200,7 @@ _hash_pgaddtup(Relation rel, page = BufferGetPage(buf); itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page)); - if (PageAddItem(page, (Item) itup, itemsize, itup_off, LP_USED) + if (PageAddItem(page, (Item) itup, itemsize, itup_off, false) == InvalidOffsetNumber) elog(ERROR, "failed to add index item to \"%s\"", RelationGetRelationName(rel)); diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 889bbcdb1a..7e87f308b2 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.58 2007/05/30 20:11:51 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.59 2007/09/12 22:10:25 tgl Exp $ * * NOTES * Overflow pages look like ordinary relation pages. @@ -684,7 +684,7 @@ _hash_squeezebucket(Relation rel, * we have found room so insert on the "write" page. */ woffnum = OffsetNumberNext(PageGetMaxOffsetNumber(wpage)); - if (PageAddItem(wpage, (Item) itup, itemsz, woffnum, LP_USED) + if (PageAddItem(wpage, (Item) itup, itemsz, woffnum, false) == InvalidOffsetNumber) elog(ERROR, "failed to add index item to \"%s\"", RelationGetRelationName(rel)); diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 29d861efb8..4b1450926d 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.68 2007/05/30 20:11:51 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.69 2007/09/12 22:10:25 tgl Exp $ * * NOTES * Postgres hash pages look like ordinary relation pages. The opaque @@ -830,7 +830,7 @@ _hash_splitbucket(Relation rel, } noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage)); - if (PageAddItem(npage, (Item) itup, itemsz, noffnum, LP_USED) + if (PageAddItem(npage, (Item) itup, itemsz, noffnum, false) == InvalidOffsetNumber) elog(ERROR, "failed to add index item to \"%s\"", RelationGetRelationName(rel)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index f1ca5249ca..09a70d813f 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.239 2007/09/07 20:59:26 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.240 2007/09/12 22:10:26 tgl Exp $ * * * INTERFACE ROUTINES @@ -198,7 +198,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) lineoff <= lines; lineoff++, lpp++) { - if (ItemIdIsUsed(lpp)) + if (ItemIdIsNormal(lpp)) { HeapTupleData loctup; bool valid; @@ -384,7 +384,7 @@ heapgettup(HeapScanDesc scan, { while (linesleft > 0) { - if (ItemIdIsUsed(lpp)) + if (ItemIdIsNormal(lpp)) { bool valid; @@ -653,7 +653,7 @@ heapgettup_pagemode(HeapScanDesc scan, { lineoff = scan->rs_vistuples[lineindex]; lpp = PageGetItemId(dp, lineoff); - Assert(ItemIdIsUsed(lpp)); + Assert(ItemIdIsNormal(lpp)); tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp); tuple->t_len = ItemIdGetLength(lpp); @@ -1334,7 +1334,7 @@ heap_release_fetch(Relation relation, /* * Must check for deleted tuple. */ - if (!ItemIdIsUsed(lp)) + if (!ItemIdIsNormal(lp)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); if (keep_buf) @@ -1463,7 +1463,7 @@ heap_get_latest_tid(Relation relation, break; } lp = PageGetItemId(dp, offnum); - if (!ItemIdIsUsed(lp)) + if (!ItemIdIsNormal(lp)) { UnlockReleaseBuffer(buffer); break; @@ -1775,6 +1775,7 @@ heap_delete(Relation relation, ItemPointer tid, dp = (PageHeader) BufferGetPage(buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid)); + Assert(ItemIdIsNormal(lp)); tp.t_data = (HeapTupleHeader) PageGetItem(dp, lp); tp.t_len = ItemIdGetLength(lp); @@ -2079,6 +2080,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, dp = (PageHeader) BufferGetPage(buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(otid)); + Assert(ItemIdIsNormal(lp)); oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp); oldtup.t_len = ItemIdGetLength(lp); @@ -2565,7 +2567,7 @@ heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, dp = (PageHeader) BufferGetPage(*buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid)); - Assert(ItemIdIsUsed(lp)); + Assert(ItemIdIsNormal(lp)); tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); tuple->t_len = ItemIdGetLength(lp); @@ -2958,7 +2960,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) if (PageGetMaxOffsetNumber(page) >= offnum) lp = PageGetItemId(page, offnum); - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp)) + if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) elog(ERROR, "heap_inplace_update: invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); @@ -3523,7 +3525,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record) { /* unused[] entries are zero-based */ lp = PageGetItemId(page, *unused + 1); - lp->lp_flags &= ~LP_USED; + ItemIdSetUnused(lp); unused++; } } @@ -3643,7 +3645,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record) if (PageGetMaxOffsetNumber(page) >= offnum) lp = PageGetItemId(page, offnum); - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp)) + if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) elog(PANIC, "heap_delete_redo: invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); @@ -3734,8 +3736,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) HeapTupleHeaderSetCmin(htup, FirstCommandId); htup->t_ctid = xlrec->target.tid; - offnum = PageAddItem(page, (Item) htup, newlen, offnum, - LP_USED | OverwritePageMode); + offnum = PageAddItem(page, (Item) htup, newlen, offnum, true); if (offnum == InvalidOffsetNumber) elog(PANIC, "heap_insert_redo: failed to add tuple"); PageSetLSN(page, lsn); @@ -3796,7 +3797,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move) if (PageGetMaxOffsetNumber(page) >= offnum) lp = PageGetItemId(page, offnum); - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp)) + if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) elog(PANIC, "heap_update_redo: invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); @@ -3913,8 +3914,7 @@ newsame:; /* Make sure there is no forward chain link in t_ctid */ htup->t_ctid = xlrec->newtid; - offnum = PageAddItem(page, (Item) htup, newlen, offnum, - LP_USED | OverwritePageMode); + offnum = PageAddItem(page, (Item) htup, newlen, offnum, true); if (offnum == InvalidOffsetNumber) elog(PANIC, "heap_update_redo: failed to add tuple"); PageSetLSN(page, lsn); @@ -3955,7 +3955,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record) if (PageGetMaxOffsetNumber(page) >= offnum) lp = PageGetItemId(page, offnum); - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp)) + if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) elog(PANIC, "heap_lock_redo: invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); @@ -4014,7 +4014,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record) if (PageGetMaxOffsetNumber(page) >= offnum) lp = PageGetItemId(page, offnum); - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp)) + if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) elog(PANIC, "heap_inplace_redo: invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 0bb617a8cd..6dbdf13fbe 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.65 2007/02/05 04:22:18 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.66 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -41,7 +41,7 @@ RelationPutHeapTuple(Relation relation, pageHeader = BufferGetPage(buffer); offnum = PageAddItem(pageHeader, (Item) tuple->t_data, - tuple->t_len, InvalidOffsetNumber, LP_USED); + tuple->t_len, InvalidOffsetNumber, false); if (offnum == InvalidOffsetNumber) elog(PANIC, "failed to add tuple to page"); diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index fe8edeedae..60aab58de3 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -96,7 +96,7 @@ * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.5 2007/05/17 15:28:29 alvherre Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.6 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -628,7 +628,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup) /* And now we can insert the tuple into the page */ newoff = PageAddItem(page, (Item) heaptup->t_data, len, - InvalidOffsetNumber, LP_USED); + InvalidOffsetNumber, false); if (newoff == InvalidOffsetNumber) elog(ERROR, "failed to add tuple"); diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index 445d23e5e5..af19525cdc 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -1,4 +1,4 @@ -$PostgreSQL: pgsql/src/backend/access/nbtree/README,v 1.17 2007/01/12 17:04:54 tgl Exp $ +$PostgreSQL: pgsql/src/backend/access/nbtree/README,v 1.18 2007/09/12 22:10:26 tgl Exp $ This directory contains a correct implementation of Lehman and Yao's high-concurrency B-tree management algorithm (P. Lehman and S. Yao, @@ -327,17 +327,17 @@ If a process visits a heap tuple and finds that it's dead and removable (ie, dead to all open transactions, not only that process), then we can return to the index and mark the corresponding index entry "known dead", allowing subsequent index scans to skip visiting the heap tuple. The -"known dead" marking uses the LP_DELETE bit in ItemIds. This is currently -only done in plain indexscans, not bitmap scans, because only plain scans -visit the heap and index "in sync" and so there's not a convenient way -to do it for bitmap scans. +"known dead" marking works by setting the index item's lp_flags state +to LP_DEAD. This is currently only done in plain indexscans, not bitmap +scans, because only plain scans visit the heap and index "in sync" and so +there's not a convenient way to do it for bitmap scans. -Once an index tuple has been marked LP_DELETE it can actually be removed +Once an index tuple has been marked LP_DEAD it can actually be removed from the index immediately; since index scans only stop "between" pages, no scan can lose its place from such a deletion. We separate the steps -because we allow LP_DELETE to be set with only a share lock (it's exactly +because we allow LP_DEAD to be set with only a share lock (it's exactly like a hint bit for a heap tuple), but physically removing tuples requires -exclusive lock. In the current code we try to remove LP_DELETE tuples when +exclusive lock. In the current code we try to remove LP_DEAD tuples when we are otherwise faced with having to split a page to do an insertion (and hence have exclusive lock on it already). @@ -349,7 +349,7 @@ same situation is created by REINDEX, since it doesn't enter dead tuples into the index.) It's sufficient to have an exclusive lock on the index page, not a -super-exclusive lock, to do deletion of LP_DELETE items. It might seem +super-exclusive lock, to do deletion of LP_DEAD items. It might seem that this breaks the interlock between VACUUM and indexscans, but that is not so: as long as an indexscanning process has a pin on the page where the index item used to be, VACUUM cannot complete its btbulkdelete scan diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 7d12c9921f..7dbaa2c245 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.158 2007/06/03 22:16:02 petere Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.159 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -221,7 +221,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * we can. We only apply _bt_isequal() when we get to a non-killed * item or the end of the page. */ - if (!ItemIdDeleted(curitemid)) + if (!ItemIdIsDead(curitemid)) { /* * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's @@ -301,7 +301,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, if (HeapTupleSatisfiesVacuum(htup.t_data, RecentGlobalXmin, hbuffer) == HEAPTUPLE_DEAD) { - curitemid->lp_flags |= LP_DELETE; + ItemIdMarkDead(curitemid); opaque->btpo_flags |= BTP_HAS_GARBAGE; /* be sure to mark the proper buffer dirty... */ if (nbuf != InvalidBuffer) @@ -368,7 +368,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * any existing equal keys because of the way _bt_binsrch() works. * * If there's not enough room in the space, we try to make room by - * removing any LP_DELETEd tuples. + * removing any LP_DEAD tuples. * * On entry, *buf and *offsetptr point to the first legal position * where the new tuple could be inserted. The caller should hold an @@ -449,7 +449,7 @@ _bt_findinsertloc(Relation rel, /* * before considering moving right, see if we can obtain enough - * space by erasing LP_DELETE items + * space by erasing LP_DEAD items */ if (P_ISLEAF(lpageop) && P_HAS_GARBAGE(lpageop)) { @@ -840,7 +840,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, itemsz = ItemIdGetLength(itemid); item = (IndexTuple) PageGetItem(origpage, itemid); if (PageAddItem(rightpage, (Item) item, itemsz, rightoff, - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(PANIC, "failed to add hikey to the right sibling"); rightoff = OffsetNumberNext(rightoff); } @@ -865,7 +865,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, item = (IndexTuple) PageGetItem(origpage, itemid); } if (PageAddItem(leftpage, (Item) item, itemsz, leftoff, - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(PANIC, "failed to add hikey to the left sibling"); leftoff = OffsetNumberNext(leftoff); @@ -1699,7 +1699,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) * Note: we *must* insert the two items in item-number order, for the * benefit of _bt_restore_page(). */ - if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, + false) == InvalidOffsetNumber) elog(PANIC, "failed to add leftkey to new root page"); pfree(new_item); @@ -1716,7 +1717,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) /* * insert the right page pointer into the new root page. */ - if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED) == InvalidOffsetNumber) + if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, + false) == InvalidOffsetNumber) elog(PANIC, "failed to add rightkey to new root page"); pfree(new_item); @@ -1803,7 +1805,7 @@ _bt_pgaddtup(Relation rel, } if (PageAddItem(page, (Item) itup, itemsize, itup_off, - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(PANIC, "failed to add item to the %s for \"%s\"", where, RelationGetRelationName(rel)); } @@ -1858,7 +1860,7 @@ _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum, /* * _bt_vacuum_one_page - vacuum just one index page. * - * Try to remove LP_DELETE items from the given page. The passed buffer + * Try to remove LP_DEAD items from the given page. The passed buffer * must be exclusive-locked, but unlike a real VACUUM, we don't need a * super-exclusive "cleanup" lock (see nbtree/README). */ @@ -1875,7 +1877,7 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer) /* * Scan over all items to see which ones need to be deleted - * according to LP_DELETE flags. + * according to LP_DEAD flags. */ minoff = P_FIRSTDATAKEY(opaque); maxoff = PageGetMaxOffsetNumber(page); @@ -1885,7 +1887,7 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer) { ItemId itemId = PageGetItemId(page, offnum); - if (ItemIdDeleted(itemId)) + if (ItemIdIsDead(itemId)) deletable[ndeletable++] = offnum; } @@ -1893,7 +1895,7 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer) _bt_delitems(rel, buffer, deletable, ndeletable); /* - * Note: if we didn't find any LP_DELETE items, then the page's + * Note: if we didn't find any LP_DEAD items, then the page's * BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a * separate write to clear it, however. We will clear it when we split * the page. diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index b8de3ba7e3..f62e4b3c5e 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.102 2007/01/05 22:19:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.103 2007/09/12 22:10:26 tgl Exp $ * * NOTES * Postgres btree pages look like ordinary relation pages. The opaque @@ -669,7 +669,7 @@ _bt_delitems(Relation rel, Buffer buf, opaque->btpo_cycleid = 0; /* - * Mark the page as not containing any LP_DELETE items. This is not + * Mark the page as not containing any LP_DEAD items. This is not * certainly true (there might be some that have recently been marked, but * weren't included in our target-item list), but it will almost always be * true and it doesn't seem worth an additional page scan to check it. diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 148b8e3266..118dc22bb3 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -57,7 +57,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.111 2007/04/08 01:26:27 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.112 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -400,7 +400,7 @@ _bt_sortaddtup(Page page, } if (PageAddItem(page, (Item) itup, itemsize, itup_off, - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to the index page"); } @@ -521,7 +521,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) */ hii = PageGetItemId(opage, P_HIKEY); *hii = *ii; - ii->lp_flags &= ~LP_USED; + ItemIdSetUnused(ii); /* redundant */ ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); /* diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 33c0ee0082..6d85695c3d 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.85 2007/04/09 22:04:01 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.86 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -789,7 +789,7 @@ _bt_checkkeys(IndexScanDesc scan, * However, if this is the last tuple on the page, we should check the * index keys to prevent uselessly advancing to the next page. */ - if (scan->ignore_killed_tuples && ItemIdDeleted(iid)) + if (scan->ignore_killed_tuples && ItemIdIsDead(iid)) { /* return immediately if there are more tuples on the page */ if (ScanDirectionIsForward(dir)) @@ -1088,7 +1088,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, } /* - * _bt_killitems - set LP_DELETE bit for items an indexscan caller has + * _bt_killitems - set LP_DEAD state for items an indexscan caller has * told us were killed * * scan->so contains information about the current page and killed tuples @@ -1096,7 +1096,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * * The caller must have pin on so->currPos.buf, but may or may not have * read-lock, as indicated by haveLock. Note that we assume read-lock - * is sufficient for setting LP_DELETE hint bits. + * is sufficient for setting LP_DEAD status (which is only a hint). * * We match items by heap TID before assuming they are the right ones to * delete. We cope with cases where items have moved right due to insertions. @@ -1149,7 +1149,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid)) { /* found the item */ - iid->lp_flags |= LP_DELETE; + ItemIdMarkDead(iid); killedsomething = true; break; /* out of inner search loop */ } @@ -1162,7 +1162,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) * commit-hint-bit status update for heap tuples: we mark the buffer dirty * but don't make a WAL log entry. * - * Whenever we mark anything LP_DELETEd, we also set the page's + * Whenever we mark anything LP_DEAD, we also set the page's * BTP_HAS_GARBAGE flag, which is likewise just a hint. */ if (killedsomething) diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 189695853a..db64422b19 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.44 2007/05/20 21:08:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.45 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -142,7 +142,7 @@ _bt_restore_page(Page page, char *from, int len) itemsz = IndexTupleDSize(itupdata); itemsz = MAXALIGN(itemsz); if (PageAddItem(page, (Item) from, itemsz, - FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) + FirstOffsetNumber, false) == InvalidOffsetNumber) elog(PANIC, "_bt_restore_page: cannot add item to page"); from += itemsz; } @@ -238,7 +238,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, { if (PageAddItem(page, (Item) datapos, datalen, ItemPointerGetOffsetNumber(&(xlrec->target.tid)), - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(PANIC, "btree_insert_redo: failed to add item"); PageSetLSN(page, lsn); @@ -389,7 +389,7 @@ btree_xlog_split(bool onleft, bool isroot, if (onleft) { if (PageAddItem(lpage, newitem, newitemsz, newitemoff, - LP_USED) == InvalidOffsetNumber) + false) == InvalidOffsetNumber) elog(PANIC, "failed to add new item to left page after split"); } @@ -398,7 +398,7 @@ btree_xlog_split(bool onleft, bool isroot, hiItem = PageGetItem(rpage, hiItemId); if (PageAddItem(lpage, hiItem, ItemIdGetLength(hiItemId), - P_HIKEY, LP_USED) == InvalidOffsetNumber) + P_HIKEY, false) == InvalidOffsetNumber) elog(PANIC, "failed to add high key to left page after split"); /* Fix opaque fields */ @@ -483,7 +483,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record) } /* - * Mark the page as not containing any LP_DELETE items --- see comments in + * Mark the page as not containing any LP_DEAD items --- see comments in * _bt_delitems(). */ opaque = (BTPageOpaque) PageGetSpecialPointer(page); diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index cb2a1380ca..73024a7e70 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.144 2007/09/05 18:10:47 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.145 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -936,7 +936,7 @@ read_info(SeqTable elm, Relation rel, Buffer *buf) RelationGetRelationName(rel), sm->magic); lp = PageGetItemId(page, FirstOffsetNumber); - Assert(ItemIdIsUsed(lp)); + Assert(ItemIdIsNormal(lp)); tuple.t_data = (HeapTupleHeader) PageGetItem((Page) page, lp); seq = (Form_pg_sequence) GETSTRUCT(&tuple); @@ -1281,7 +1281,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) itemsz = record->xl_len - sizeof(xl_seq_rec); itemsz = MAXALIGN(itemsz); if (PageAddItem(page, (Item) item, itemsz, - FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) + FirstOffsetNumber, false) == InvalidOffsetNumber) elog(PANIC, "seq_redo: failed to add item to page"); PageSetLSN(page, lsn); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index afcdaa5e91..5d8b6aa168 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.218 2007/08/15 21:39:50 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.219 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1826,7 +1826,7 @@ ltrmark:; dp = (PageHeader) BufferGetPage(buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid)); - Assert(ItemIdIsUsed(lp)); + Assert(ItemIdIsNormal(lp)); tuple.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); tuple.t_len = ItemIdGetLength(lp); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 89ddeedbea..f9b9423534 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.357 2007/09/08 20:31:14 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.358 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1522,7 +1522,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, /* mark it unused on the temp page */ lpp = PageGetItemId(tempPage, offnum); - lpp->lp_flags &= ~LP_USED; + ItemIdSetUnused(lpp); vacpage->offsets[vacpage->offsets_free++] = offnum; tups_vacuumed += 1; @@ -1558,7 +1558,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, { /* Just use current available space */ vacpage->free = PageGetFreeSpaceWithFillFactor(onerel, page); - /* Need to reap the page if it has ~LP_USED line pointers */ + /* Need to reap the page if it has LP_UNUSED line pointers */ do_reap = (vacpage->offsets_free > 0); } @@ -2582,7 +2582,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (HeapTupleHeaderGetXvac(htup) != myXID) elog(ERROR, "invalid XVAC in tuple header"); - itemid->lp_flags &= ~LP_USED; + ItemIdSetUnused(itemid); num_tuples++; } Assert(vacpage->offsets_free == num_tuples); @@ -2714,7 +2714,7 @@ move_chain_tuple(Relation rel, newtup.t_data->t_infomask |= HEAP_MOVED_IN; HeapTupleHeaderSetXvac(newtup.t_data, myXID); newoff = PageAddItem(dst_page, (Item) newtup.t_data, tuple_len, - InvalidOffsetNumber, LP_USED); + InvalidOffsetNumber, false); if (newoff == InvalidOffsetNumber) elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain", (unsigned long) tuple_len, dst_vacpage->blkno); @@ -2819,7 +2819,7 @@ move_plain_tuple(Relation rel, /* add tuple to the page */ newoff = PageAddItem(dst_page, (Item) newtup.t_data, tuple_len, - InvalidOffsetNumber, LP_USED); + InvalidOffsetNumber, false); if (newoff == InvalidOffsetNumber) elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)", (unsigned long) tuple_len, @@ -3033,7 +3033,7 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage) for (i = 0; i < vacpage->offsets_free; i++) { itemid = PageGetItemId(page, vacpage->offsets[i]); - itemid->lp_flags &= ~LP_USED; + ItemIdSetUnused(itemid); } uncnt = PageRepairFragmentation(page, unused); diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 0552c3e0ae..3dbd17de58 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -36,7 +36,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.94 2007/09/12 02:05:48 alvherre Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.95 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -642,7 +642,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, break; /* past end of tuples for this block */ toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]); itemid = PageGetItemId(page, toff); - itemid->lp_flags &= ~LP_USED; + ItemIdSetUnused(itemid); } uncnt = PageRepairFragmentation(page, unused); diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index c2ae755e01..f1e30aeb8f 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -21,7 +21,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.18 2007/06/09 18:49:55 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.19 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -183,7 +183,7 @@ BitmapHeapNext(BitmapHeapScanState *node) targoffset = scan->rs_vistuples[scan->rs_cindex]; dp = (Page) BufferGetPage(scan->rs_cbuf); lp = PageGetItemId(dp, targoffset); - Assert(ItemIdIsUsed(lp)); + Assert(ItemIdIsNormal(lp)); scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); scan->rs_ctup.t_len = ItemIdGetLength(lp); @@ -317,7 +317,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) /* * Must check for deleted tuple. */ - if (!ItemIdIsUsed(lp)) + if (!ItemIdIsNormal(lp)) continue; /* diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index b246b0afeb..3ce2f04bd8 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.72 2007/03/02 00:48:44 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.73 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -99,7 +99,10 @@ PageHeaderIsValid(PageHeader page) * Add an item to a page. Return value is offset at which it was * inserted, or InvalidOffsetNumber if there's not room to insert. * - * If offsetNumber is valid and <= current max offset in the page, + * If overwrite is true, we just store the item at the specified + * offsetNumber (which must be either a currently-unused item pointer, + * or one past the last existing item). Otherwise, + * if offsetNumber is valid and <= current max offset in the page, * insert item into the array at that position by shuffling ItemId's * down to make room. * If offsetNumber is not valid, then assign one by finding the first @@ -112,7 +115,7 @@ PageAddItem(Page page, Item item, Size size, OffsetNumber offsetNumber, - ItemIdFlags flags) + bool overwrite) { PageHeader phdr = (PageHeader) page; Size alignedSize; @@ -121,9 +124,6 @@ PageAddItem(Page page, ItemId itemId; OffsetNumber limit; bool needshuffle = false; - bool overwritemode = (flags & OverwritePageMode) != 0; - - flags &= ~OverwritePageMode; /* * Be wary about corrupted page pointers @@ -146,12 +146,12 @@ PageAddItem(Page page, if (OffsetNumberIsValid(offsetNumber)) { /* yes, check it */ - if (overwritemode) + if (overwrite) { if (offsetNumber < limit) { itemId = PageGetItemId(phdr, offsetNumber); - if (ItemIdIsUsed(itemId) || ItemIdGetLength(itemId) != 0) + if (ItemIdIsUsed(itemId) || ItemIdHasStorage(itemId)) { elog(WARNING, "will not overwrite a used ItemId"); return InvalidOffsetNumber; @@ -170,11 +170,15 @@ PageAddItem(Page page, /* if no free slot, we'll put it at limit (1st open slot) */ if (PageHasFreeLinePointers(phdr)) { - /* look for "recyclable" (unused & deallocated) ItemId */ + /* + * Look for "recyclable" (unused) ItemId. We check for no + * storage as well, just to be paranoid --- unused items + * should never have storage. + */ for (offsetNumber = 1; offsetNumber < limit; offsetNumber++) { itemId = PageGetItemId(phdr, offsetNumber); - if (!ItemIdIsUsed(itemId) && ItemIdGetLength(itemId) == 0) + if (!ItemIdIsUsed(itemId) && !ItemIdHasStorage(itemId)) break; } if (offsetNumber >= limit) @@ -224,9 +228,7 @@ PageAddItem(Page page, (limit - offsetNumber) * sizeof(ItemIdData)); /* set the item pointer */ - itemId->lp_off = upper; - itemId->lp_len = size; - itemId->lp_flags = flags; + ItemIdSetNormal(itemId, upper, size); /* copy the item's data onto the page */ memcpy((char *) page + upper, item, size); @@ -326,6 +328,7 @@ PageRepairFragmentation(Page page, OffsetNumber *unused) itemidptr; ItemId lp; int nline, + nstorage, nused; int i; Size totallen; @@ -349,38 +352,41 @@ PageRepairFragmentation(Page page, OffsetNumber *unused) pd_lower, pd_upper, pd_special))); nline = PageGetMaxOffsetNumber(page); - nused = 0; + nused = nstorage = 0; for (i = 0; i < nline; i++) { lp = PageGetItemId(page, i + 1); - if (ItemIdDeleted(lp)) /* marked for deletion */ - lp->lp_flags &= ~(LP_USED | LP_DELETE); if (ItemIdIsUsed(lp)) + { nused++; - else if (unused) - unused[i - nused] = (OffsetNumber) i; + if (ItemIdHasStorage(lp)) + nstorage++; + } + else + { + /* Unused entries should have lp_len = 0, but make sure */ + ItemIdSetUnused(lp); + /* Report to caller if asked for */ + if (unused) + unused[i - nused] = (OffsetNumber) i; + } } - if (nused == 0) + if (nstorage == 0) { /* Page is completely empty, so just reset it quickly */ - for (i = 0; i < nline; i++) - { - lp = PageGetItemId(page, i + 1); - lp->lp_len = 0; /* indicate unused & deallocated */ - } ((PageHeader) page)->pd_upper = pd_special; } else - { /* nused != 0 */ + { /* nstorage != 0 */ /* Need to compact the page the hard way */ - itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nused); + itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nstorage); itemidptr = itemidbase; totallen = 0; for (i = 0; i < nline; i++) { lp = PageGetItemId(page, i + 1); - if (ItemIdIsUsed(lp)) + if (ItemIdHasStorage(lp)) { itemidptr->offsetindex = i; itemidptr->itemoff = ItemIdGetOffset(lp); @@ -394,10 +400,6 @@ PageRepairFragmentation(Page page, OffsetNumber *unused) totallen += itemidptr->alignedlen; itemidptr++; } - else - { - lp->lp_len = 0; /* indicate unused & deallocated */ - } } if (totallen > (Size) (pd_special - pd_lower)) @@ -407,13 +409,13 @@ PageRepairFragmentation(Page page, OffsetNumber *unused) (unsigned int) totallen, pd_special - pd_lower))); /* sort itemIdSortData array into decreasing itemoff order */ - qsort((char *) itemidbase, nused, sizeof(itemIdSortData), + qsort((char *) itemidbase, nstorage, sizeof(itemIdSortData), itemoffcompare); /* compactify page */ upper = pd_special; - for (i = 0, itemidptr = itemidbase; i < nused; i++, itemidptr++) + for (i = 0, itemidptr = itemidbase; i < nstorage; i++, itemidptr++) { lp = PageGetItemId(page, itemidptr->offsetindex + 1); upper -= itemidptr->alignedlen; @@ -520,6 +522,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) offidx = offnum - 1; tup = PageGetItemId(page, offnum); + Assert(ItemIdHasStorage(tup)); size = ItemIdGetLength(tup); offset = ItemIdGetOffset(tup); @@ -577,6 +580,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) { ItemId ii = PageGetItemId(phdr, i); + Assert(ItemIdHasStorage(ii)); if (ItemIdGetOffset(ii) <= offset) ii->lp_off += size; } @@ -654,6 +658,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) for (offnum = 1; offnum <= nline; offnum++) { lp = PageGetItemId(page, offnum); + Assert(ItemIdHasStorage(lp)); size = ItemIdGetLength(lp); offset = ItemIdGetOffset(lp); if (offset < pd_upper || diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h index d38544e3f0..7e6e429108 100644 --- a/src/include/storage/bufpage.h +++ b/src/include/storage/bufpage.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/storage/bufpage.h,v 1.72 2007/03/02 00:48:44 tgl Exp $ + * $PostgreSQL: pgsql/src/include/storage/bufpage.h,v 1.73 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -137,7 +137,7 @@ typedef PageHeaderData *PageHeader; * pd_flags contains the following flag bits. Undefined bits are initialized * to zero and may be used in the future. * - * PD_HAS_FREE_LINES is set if there are any not-LP_USED line pointers before + * PD_HAS_FREE_LINES is set if there are any LP_UNUSED line pointers before * pd_lower. This should be considered a hint rather than the truth, since * changes to it are not WAL-logged. */ @@ -274,7 +274,7 @@ typedef PageHeaderData *PageHeader; #define PageGetItem(page, itemId) \ ( \ AssertMacro(PageIsValid(page)), \ - AssertMacro(ItemIdIsUsed(itemId)), \ + AssertMacro(ItemIdHasStorage(itemId)), \ (Item)(((char *)(page)) + ItemIdGetOffset(itemId)) \ ) @@ -346,7 +346,7 @@ typedef PageHeaderData *PageHeader; extern void PageInit(Page page, Size pageSize, Size specialSize); extern bool PageHeaderIsValid(PageHeader page); extern OffsetNumber PageAddItem(Page page, Item item, Size size, - OffsetNumber offsetNumber, ItemIdFlags flags); + OffsetNumber offsetNumber, bool overwrite); extern Page PageGetTempPage(Page page, Size specialSize); extern void PageRestoreTempPage(Page tempPage, Page oldPage); extern int PageRepairFragmentation(Page page, OffsetNumber *unused); diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h index 63a076550f..2a18f18226 100644 --- a/src/include/storage/itemid.h +++ b/src/include/storage/itemid.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/storage/itemid.h,v 1.27 2007/01/05 22:19:58 momjian Exp $ + * $PostgreSQL: pgsql/src/include/storage/itemid.h,v 1.28 2007/09/12 22:10:26 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -16,41 +16,36 @@ /* * An item pointer (also called line pointer) on a buffer page + * + * In some cases an item pointer is "in use" but does not have any associated + * storage on the page. By convention, lp_len == 0 in every item pointer + * that does not have storage, independently of its lp_flags state. */ typedef struct ItemIdData -{ /* line pointers */ - unsigned lp_off:15, /* offset to start of tuple */ - lp_flags:2, /* flags for tuple */ - lp_len:15; /* length of tuple */ +{ + unsigned lp_off:15, /* offset to tuple (from start of page) */ + lp_flags:2, /* state of item pointer, see below */ + lp_len:15; /* byte length of tuple */ } ItemIdData; typedef ItemIdData *ItemId; /* - * lp_flags contains these flags: - */ -#define LP_USED 0x01 /* this line pointer is being used */ - -#define LP_DELETE 0x02 /* item is to be deleted */ - -#define ItemIdDeleted(itemId) \ - (((itemId)->lp_flags & LP_DELETE) != 0) - -/* - * This bit may be passed to PageAddItem together with - * LP_USED & LP_DELETE bits to specify overwrite mode + * lp_flags has these possible states. An UNUSED line pointer is available + * for immediate re-use, the other states are not. */ -#define OverwritePageMode 0x10 +#define LP_UNUSED 0 /* unused (should always have lp_len=0) */ +#define LP_NORMAL 1 /* used (should always have lp_len>0) */ +#define LP_REDIRECT 2 /* HOT redirect (should have lp_len=0) */ +#define LP_DEAD 3 /* dead, may or may not have storage */ /* - * Item offsets, lengths, and flags are represented by these types when + * Item offsets and lengths are represented by these types when * they're not actually stored in an ItemIdData. */ typedef uint16 ItemOffset; typedef uint16 ItemLength; -typedef bits16 ItemIdFlags; - /* ---------------- * support macros @@ -75,23 +70,114 @@ typedef bits16 ItemIdFlags; #define ItemIdGetFlags(itemId) \ ((itemId)->lp_flags) +/* + * ItemIdGetRedirect + * In a REDIRECT pointer, lp_off holds the link to the next item pointer + */ +#define ItemIdGetRedirect(itemId) \ + ((itemId)->lp_off) + /* * ItemIdIsValid - * True iff disk item identifier is valid. + * True iff item identifier is valid. + * This is a pretty weak test, probably useful only in Asserts. */ #define ItemIdIsValid(itemId) PointerIsValid(itemId) /* * ItemIdIsUsed - * True iff disk item identifier is in use. - * - * Note: - * Assumes disk item identifier is valid. + * True iff item identifier is in use. */ #define ItemIdIsUsed(itemId) \ + ((itemId)->lp_flags != LP_UNUSED) + +/* + * ItemIdIsNormal + * True iff item identifier is in state NORMAL. + */ +#define ItemIdIsNormal(itemId) \ + ((itemId)->lp_flags == LP_NORMAL) + +/* + * ItemIdIsRedirected + * True iff item identifier is in state REDIRECT. + */ +#define ItemIdIsRedirected(itemId) \ + ((itemId)->lp_flags == LP_REDIRECT) + +/* + * ItemIdIsDead + * True iff item identifier is in state DEAD. + */ +#define ItemIdIsDead(itemId) \ + ((itemId)->lp_flags == LP_DEAD) + +/* + * ItemIdHasStorage + * True iff item identifier has associated storage. + */ +#define ItemIdHasStorage(itemId) \ + ((itemId)->lp_len != 0) + +/* + * ItemIdSetUnused + * Set the item identifier to be UNUSED, with no storage. + * Beware of multiple evaluations of itemId! + */ +#define ItemIdSetUnused(itemId) \ +( \ + (itemId)->lp_flags = LP_UNUSED, \ + (itemId)->lp_off = 0, \ + (itemId)->lp_len = 0 \ +) + +/* + * ItemIdSetNormal + * Set the item identifier to be NORMAL, with the specified storage. + * Beware of multiple evaluations of itemId! + */ +#define ItemIdSetNormal(itemId, off, len) \ +( \ + (itemId)->lp_flags = LP_NORMAL, \ + (itemId)->lp_off = (off), \ + (itemId)->lp_len = (len) \ +) + +/* + * ItemIdSetRedirect + * Set the item identifier to be REDIRECT, with the specified link. + * Beware of multiple evaluations of itemId! + */ +#define ItemIdSetRedirect(itemId, link) \ +( \ + (itemId)->lp_flags = LP_REDIRECT, \ + (itemId)->lp_off = (link), \ + (itemId)->lp_len = 0 \ +) + +/* + * ItemIdSetDead + * Set the item identifier to be DEAD, with no storage. + * Beware of multiple evaluations of itemId! + */ +#define ItemIdSetDead(itemId) \ +( \ + (itemId)->lp_flags = LP_DEAD, \ + (itemId)->lp_off = 0, \ + (itemId)->lp_len = 0 \ +) + +/* + * ItemIdMarkDead + * Set the item identifier to be DEAD, keeping its existing storage. + * + * Note: in indexes, this is used as if it were a hint-bit mechanism; + * we trust that multiple processors can do this in parallel and get + * the same result. + */ +#define ItemIdMarkDead(itemId) \ ( \ - AssertMacro(ItemIdIsValid(itemId)), \ - (bool) (((itemId)->lp_flags & LP_USED) != 0) \ + (itemId)->lp_flags = LP_DEAD \ ) #endif /* ITEMID_H */ -- GitLab