diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index ee996b5660..9f6659ae10 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -934,7 +934,7 @@ _bt_getbuf(Relation rel, Relation heaprel, BlockNumber blkno, int access) return buf; } - if (BTPageIsRecyclable(page)) + if (BTPageIsRecyclable(page, heaprel)) { /* * If we are generating WAL for Hot Standby then create a @@ -2961,6 +2961,8 @@ void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) { IndexBulkDeleteResult *stats = vstate->stats; + IndexVacuumInfo *info = vstate->info; + Relation heaprel = info->heaprel; Assert(stats->pages_newly_deleted >= vstate->npendingpages); @@ -2993,7 +2995,7 @@ _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) * essential; GlobalVisCheckRemovableFullXid() will not reliably recognize * that it is now safe to recycle newly deleted pages without this step. */ - GetOldestNonRemovableTransactionId(NULL); + GetOldestNonRemovableTransactionId(heaprel); for (int i = 0; i < vstate->npendingpages; i++) { @@ -3008,7 +3010,7 @@ _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) * must be non-recyclable too, since _bt_pendingfsm_add() adds pages * to the array in safexid order. */ - if (!GlobalVisCheckRemovableFullXid(NULL, safexid)) + if (!GlobalVisCheckRemovableFullXid(heaprel, safexid)) break; RecordFreeIndexPage(rel, target); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 97a39b0f65..409a2c1210 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -1039,6 +1039,7 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno) IndexBulkDeleteCallback callback = vstate->callback; void *callback_state = vstate->callback_state; Relation rel = info->index; + Relation heaprel = info->heaprel; bool attempt_pagedel; BlockNumber blkno, backtrack_to; @@ -1124,7 +1125,7 @@ backtrack: } } - if (!opaque || BTPageIsRecyclable(page)) + if (!opaque || BTPageIsRecyclable(page, heaprel)) { /* Okay to recycle this page (which could be leaf or internal) */ RecordFreeIndexPage(rel, blkno); diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 3cff71e720..5a7e55441b 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -506,8 +506,7 @@ vacuumRedirectAndPlaceholder(Relation index, Relation heaprel, Buffer buffer) xlrec.nToPlaceholder = 0; xlrec.snapshotConflictHorizon = InvalidTransactionId; - /* XXX: providing heap relation would allow more pruning */ - vistest = GlobalVisTestFor(NULL); + vistest = GlobalVisTestFor(heaprel); START_CRIT_SECTION(); diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 6dee307042..953bf6586b 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -288,7 +288,7 @@ BTPageGetDeleteXid(Page page) * well need special handling for new pages anyway. */ static inline bool -BTPageIsRecyclable(Page page) +BTPageIsRecyclable(Page page, Relation heaprel) { BTPageOpaque opaque; @@ -307,12 +307,8 @@ BTPageIsRecyclable(Page page) * For that check if the deletion XID could still be visible to * anyone. If not, then no scan that's still in progress could have * seen its downlink, and we can recycle it. - * - * XXX: If we had the heap relation we could be more aggressive about - * recycling deleted pages in non-catalog relations. For now we just - * pass NULL. That is at least simple and consistent. */ - return GlobalVisCheckRemovableFullXid(NULL, BTPageGetDeleteXid(page)); + return GlobalVisCheckRemovableFullXid(heaprel, BTPageGetDeleteXid(page)); } return false;