From 4fbcb6b64c99649f76356b27c4ac39b735307585 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Fri, 8 Mar 2024 16:45:57 -0500
Subject: [PATCH v2 07/17] Execute freezing in heap_page_prune()

As a step toward combining the prune and freeze WAL records, execute
freezing in heap_page_prune(). The logic to determine whether or not to
execute freeze plans was moved from lazy_scan_prune() over to
heap_page_prune() with little modification.
---
 src/backend/access/heap/heapam_handler.c |   2 +-
 src/backend/access/heap/pruneheap.c      | 151 +++++++++++++++++------
 src/backend/access/heap/vacuumlazy.c     | 129 ++++++-------------
 src/backend/storage/ipc/procarray.c      |   6 +-
 src/include/access/heapam.h              |  41 +++---
 src/tools/pgindent/typedefs.list         |   2 +-
 6 files changed, 180 insertions(+), 151 deletions(-)

diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 680a50bf8b1..5e522f5b0ba 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -1046,7 +1046,7 @@ heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
 		 * We ignore unused and redirect line pointers.  DEAD line pointers
 		 * should be counted as dead, because we need vacuum to run to get rid
 		 * of them.  Note that this rule agrees with the way that
-		 * heap_page_prune() counts things.
+		 * heap_page_prune_and_freeze() counts things.
 		 */
 		if (!ItemIdIsNormal(itemid))
 		{
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 44a5c0a917b..9c709315192 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -17,16 +17,18 @@
 #include "access/heapam.h"
 #include "access/heapam_xlog.h"
 #include "access/htup_details.h"
+#include "access/multixact.h"
 #include "access/transam.h"
 #include "access/xlog.h"
 #include "access/xloginsert.h"
+#include "executor/instrument.h"
 #include "miscadmin.h"
 #include "pgstat.h"
 #include "storage/bufmgr.h"
 #include "utils/snapmgr.h"
 #include "utils/rel.h"
 
-/* Working data for heap_page_prune and subroutines */
+/* Working data for heap_page_prune_and_freeze() and subroutines */
 typedef struct
 {
 	Relation	rel;
@@ -61,17 +63,18 @@ static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
 											   Buffer buffer);
 static int	heap_prune_chain(Buffer buffer,
 							 OffsetNumber rootoffnum,
-							 PruneState *prstate, PruneResult *presult);
+							 PruneState *prstate, PruneFreezeResult *presult);
 
 static void prune_prepare_freeze_tuple(Page page, OffsetNumber offnum,
-									   HeapPageFreeze *pagefrz, PruneResult *presult);
+									   HeapPageFreeze *pagefrz, HeapTupleFreeze *frozen,
+									   PruneFreezeResult *presult);
 static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
 static void heap_prune_record_redirect(PruneState *prstate,
 									   OffsetNumber offnum, OffsetNumber rdoffnum);
 static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
-								   PruneResult *presult);
+								   PruneFreezeResult *presult);
 static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
-											 PruneResult *presult);
+											 PruneFreezeResult *presult);
 static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
 static void page_verify_redirects(Page page);
 
@@ -151,15 +154,15 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 		 */
 		if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
 		{
-			PruneResult presult;
+			PruneFreezeResult presult;
 
 			/*
 			 * For now, pass mark_unused_now as false regardless of whether or
 			 * not the relation has indexes, since we cannot safely determine
 			 * that during on-access pruning with the current implementation.
 			 */
-			heap_page_prune(relation, buffer, vistest, false, NULL,
-							&presult, NULL);
+			heap_page_prune_and_freeze(relation, buffer, vistest, false, NULL,
+									   &presult, NULL);
 
 			/*
 			 * Report the number of tuples reclaimed to pgstats.  This is
@@ -193,7 +196,12 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 
 
 /*
- * Prune and repair fragmentation in the specified page.
+ * Prune and repair fragmentation and potentially freeze tuples on the
+ * specified page.
+ *
+ * If the page can be marked all-frozen in the visibility map, we may
+ * opportunistically freeze tuples on the page if either its tuples are old
+ * enough or freezing will be cheap enough.
  *
  * Caller must have pin and buffer cleanup lock on the page.  Note that we
  * don't update the FSM information for page on caller's behalf.  Caller might
@@ -207,23 +215,24 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
  * mark_unused_now indicates whether or not dead items can be set LP_UNUSED during
  * pruning.
  *
- * pagefrz contains both input and output parameters used if the caller is
- * interested in potentially freezing tuples on the page.
+ * pagefrz is an input parameter containing visibility cutoff information and
+ * the current relfrozenxid and relminmxids used if the caller is interested in
+ * freezing tuples on the page.
  *
  * off_loc is the offset location required by the caller to use in error
  * callback.
  *
  * presult contains output parameters needed by callers such as the number of
  * tuples removed and the number of line pointers newly marked LP_DEAD.
- * heap_page_prune() is responsible for initializing it.
+ * heap_page_prune_and_freeze() is responsible for initializing it.
  */
 void
-heap_page_prune(Relation relation, Buffer buffer,
-				GlobalVisState *vistest,
-				bool mark_unused_now,
-				HeapPageFreeze *pagefrz,
-				PruneResult *presult,
-				OffsetNumber *off_loc)
+heap_page_prune_and_freeze(Relation relation, Buffer buffer,
+						   GlobalVisState *vistest,
+						   bool mark_unused_now,
+						   HeapPageFreeze *pagefrz,
+						   PruneFreezeResult *presult,
+						   OffsetNumber *off_loc)
 {
 	Page		page = BufferGetPage(buffer);
 	BlockNumber blockno = BufferGetBlockNumber(buffer);
@@ -231,6 +240,14 @@ heap_page_prune(Relation relation, Buffer buffer,
 				maxoff;
 	PruneState	prstate;
 	HeapTupleData tup;
+	bool		do_freeze;
+	int64		fpi_before = pgWalUsage.wal_fpi;
+	TransactionId frz_conflict_horizon = InvalidTransactionId;
+
+	/*
+	 * One entry for every tuple that we may freeze.
+	 */
+	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
 
 	/*
 	 * Our strategy is to scan the page and make lists of items to change,
@@ -267,6 +284,10 @@ heap_page_prune(Relation relation, Buffer buffer,
 	/* for recovery conflicts */
 	presult->frz_conflict_horizon = InvalidTransactionId;
 
+	/* For advancing relfrozenxid and relminmxid */
+	presult->new_relfrozenxid = InvalidTransactionId;
+	presult->new_relminmxid = InvalidMultiXactId;
+
 	maxoff = PageGetMaxOffsetNumber(page);
 	tup.t_tableOid = RelationGetRelid(prstate.rel);
 
@@ -426,7 +447,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 
 		if (pagefrz)
 			prune_prepare_freeze_tuple(page, offnum,
-									   pagefrz, presult);
+									   pagefrz, frozen, presult);
 
 		/* Ignore items already processed as part of an earlier chain */
 		if (prstate.marked[offnum])
@@ -541,6 +562,61 @@ heap_page_prune(Relation relation, Buffer buffer,
 
 	/* Record number of newly-set-LP_DEAD items for caller */
 	presult->nnewlpdead = prstate.ndead;
+
+	/*
+	 * Freeze the page when heap_prepare_freeze_tuple indicates that at least
+	 * one XID/MXID from before FreezeLimit/MultiXactCutoff is present.  Also
+	 * freeze when pruning generated an FPI, if doing so means that we set the
+	 * page all-frozen afterwards (might not happen until final heap pass).
+	 */
+	if (pagefrz)
+		do_freeze = pagefrz->freeze_required ||
+			(presult->all_visible_except_removable && presult->all_frozen &&
+			 presult->nfrozen > 0 &&
+			 fpi_before != pgWalUsage.wal_fpi);
+	else
+		do_freeze = false;
+
+	if (do_freeze)
+	{
+		frz_conflict_horizon = heap_frz_conflict_horizon(presult, pagefrz);
+
+		/* Execute all freeze plans for page as a single atomic action */
+		heap_freeze_execute_prepared(relation, buffer,
+									 frz_conflict_horizon,
+									 frozen, presult->nfrozen);
+	}
+	else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
+	{
+		/*
+		 * If we will neither freeze tuples on the page nor set the page all
+		 * frozen in the visibility map, the page is not all frozen and there
+		 * will be no newly frozen tuples.
+		 */
+		presult->all_frozen = false;
+		presult->nfrozen = 0;	/* avoid miscounts in instrumentation */
+	}
+
+	/* Caller won't update new_relfrozenxid and new_relminmxid */
+	if (!pagefrz)
+		return;
+
+	/*
+	 * If we will freeze tuples on the page or, even if we don't freeze tuples
+	 * on the page, if we will set the page all-frozen in the visibility map,
+	 * we can advance relfrozenxid and relminmxid to the values in
+	 * pagefrz->FreezePageRelfrozenXid and pagefrz->FreezePageRelminMxid.
+	 */
+	if (presult->all_frozen || presult->nfrozen > 0)
+	{
+		presult->new_relfrozenxid = pagefrz->FreezePageRelfrozenXid;
+		presult->new_relminmxid = pagefrz->FreezePageRelminMxid;
+	}
+	else
+	{
+		presult->new_relfrozenxid = pagefrz->NoFreezePageRelfrozenXid;
+		presult->new_relminmxid = pagefrz->NoFreezePageRelminMxid;
+	}
 }
 
 
@@ -598,7 +674,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
  */
 static int
 heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
-				 PruneState *prstate, PruneResult *presult)
+				 PruneState *prstate, PruneFreezeResult *presult)
 {
 	int			ndeleted = 0;
 	Page		dp = (Page) BufferGetPage(buffer);
@@ -863,10 +939,10 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 	{
 		/*
 		 * We found a redirect item that doesn't point to a valid follow-on
-		 * item.  This can happen if the loop in heap_page_prune caused us to
-		 * visit the dead successor of a redirect item before visiting the
-		 * redirect item.  We can clean up by setting the redirect item to
-		 * DEAD state or LP_UNUSED if the caller indicated.
+		 * item.  This can happen if the loop in heap_page_prune_and_freeze()
+		 * caused us to visit the dead successor of a redirect item before
+		 * visiting the redirect item.  We can clean up by setting the
+		 * redirect item to DEAD state or LP_UNUSED if the caller indicated.
 		 */
 		heap_prune_record_dead_or_unused(prstate, rootoffnum, presult);
 	}
@@ -883,7 +959,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 static void
 prune_prepare_freeze_tuple(Page page, OffsetNumber offnum,
 						   HeapPageFreeze *pagefrz,
-						   PruneResult *presult)
+						   HeapTupleFreeze *frozen,
+						   PruneFreezeResult *presult)
 {
 	bool		totally_frozen;
 	HeapTupleHeader htup;
@@ -905,11 +982,11 @@ prune_prepare_freeze_tuple(Page page, OffsetNumber offnum,
 
 	/* Tuple with storage -- consider need to freeze */
 	if ((heap_prepare_freeze_tuple(htup, pagefrz,
-								   &presult->frozen[presult->nfrozen],
+								   &frozen[presult->nfrozen],
 								   &totally_frozen)))
 	{
 		/* Save prepared freeze plan for later */
-		presult->frozen[presult->nfrozen++].offset = offnum;
+		frozen[presult->nfrozen++].offset = offnum;
 	}
 
 	/*
@@ -953,7 +1030,7 @@ heap_prune_record_redirect(PruneState *prstate,
 /* Record line pointer to be marked dead */
 static void
 heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
-					   PruneResult *presult)
+					   PruneFreezeResult *presult)
 {
 	Assert(prstate->ndead < MaxHeapTuplesPerPage);
 	prstate->nowdead[prstate->ndead] = offnum;
@@ -976,7 +1053,7 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
  */
 static void
 heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
-								 PruneResult *presult)
+								 PruneFreezeResult *presult)
 {
 	/*
 	 * If the caller set mark_unused_now to true, we can remove dead tuples
@@ -1003,9 +1080,9 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
 
 
 /*
- * Perform the actual page changes needed by heap_page_prune.
- * It is expected that the caller has a full cleanup lock on the
- * buffer.
+ * Perform the actual page pruning modifications needed by
+ * heap_page_prune_and_freeze(). It is expected that the caller has a full
+ * cleanup lock on the buffer.
  */
 void
 heap_page_prune_execute(Buffer buffer,
@@ -1119,11 +1196,11 @@ heap_page_prune_execute(Buffer buffer,
 #ifdef USE_ASSERT_CHECKING
 
 		/*
-		 * When heap_page_prune() was called, mark_unused_now may have been
-		 * passed as true, which allows would-be LP_DEAD items to be made
-		 * LP_UNUSED instead. This is only possible if the relation has no
-		 * indexes. If there are any dead items, then mark_unused_now was not
-		 * true and every item being marked LP_UNUSED must refer to a
+		 * When heap_page_prune_and_freeze() was called, mark_unused_now may
+		 * have been passed as true, which allows would-be LP_DEAD items to be
+		 * made LP_UNUSED instead. This is only possible if the relation has
+		 * no indexes. If there are any dead items, then mark_unused_now was
+		 * not true and every item being marked LP_UNUSED must refer to a
 		 * heap-only tuple.
 		 */
 		if (ndead > 0)
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index abbb7ab3ada..6dd8d457c9c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -269,9 +269,6 @@ static void update_vacuum_error_info(LVRelState *vacrel,
 static void restore_vacuum_error_info(LVRelState *vacrel,
 									  const LVSavedErrInfo *saved_vacrel);
 
-static TransactionId heap_frz_conflict_horizon(PruneResult *presult,
-											   HeapPageFreeze *pagefrz);
-
 /*
  *	heap_vacuum_rel() -- perform VACUUM for one heap relation
  *
@@ -432,12 +429,13 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	 * as an upper bound on the XIDs stored in the pages we'll actually scan
 	 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
 	 *
-	 * Next acquire vistest, a related cutoff that's used in heap_page_prune.
-	 * We expect vistest will always make heap_page_prune remove any deleted
-	 * tuple whose xmax is < OldestXmin.  lazy_scan_prune must never become
-	 * confused about whether a tuple should be frozen or removed.  (In the
-	 * future we might want to teach lazy_scan_prune to recompute vistest from
-	 * time to time, to increase the number of dead tuples it can prune away.)
+	 * Next acquire vistest, a related cutoff that's used in
+	 * heap_page_prune_and_freeze(). We expect vistest will always make
+	 * heap_page_prune_and_freeze() remove any deleted tuple whose xmax is <
+	 * OldestXmin.  lazy_scan_prune must never become confused about whether a
+	 * tuple should be frozen or removed.  (In the future we might want to
+	 * teach lazy_scan_prune to recompute vistest from time to time, to
+	 * increase the number of dead tuples it can prune away.)
 	 */
 	vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
 	vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
@@ -1379,8 +1377,8 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
  * Determine the snapshotConflictHorizon for freezing. Must only be called
  * after pruning and determining if the page is freezable.
  */
-static TransactionId
-heap_frz_conflict_horizon(PruneResult *presult, HeapPageFreeze *pagefrz)
+TransactionId
+heap_frz_conflict_horizon(PruneFreezeResult *presult, HeapPageFreeze *pagefrz)
 {
 	TransactionId result;
 
@@ -1407,21 +1405,21 @@ heap_frz_conflict_horizon(PruneResult *presult, HeapPageFreeze *pagefrz)
  *
  * Caller must hold pin and buffer cleanup lock on the buffer.
  *
- * Prior to PostgreSQL 14 there were very rare cases where heap_page_prune()
- * was allowed to disagree with our HeapTupleSatisfiesVacuum() call about
- * whether or not a tuple should be considered DEAD.  This happened when an
- * inserting transaction concurrently aborted (after our heap_page_prune()
- * call, before our HeapTupleSatisfiesVacuum() call).  There was rather a lot
- * of complexity just so we could deal with tuples that were DEAD to VACUUM,
- * but nevertheless were left with storage after pruning.
+ * Prior to PostgreSQL 14 there were very rare cases where
+ * heap_page_prune_and_freeze() was allowed to disagree with our
+ * HeapTupleSatisfiesVacuum() call about whether or not a tuple should be
+ * considered DEAD.  This happened when an inserting transaction concurrently
+ * aborted (after our heap_page_prune_and_freeze() call, before our
+ * HeapTupleSatisfiesVacuum() call).  There was rather a lot of complexity just
+ * so we could deal with tuples that were DEAD to VACUUM, but nevertheless were
+ * left with storage after pruning.
  *
  * As of Postgres 17, we circumvent this problem altogether by reusing the
- * result of heap_page_prune()'s visibility check. Without the second call to
- * HeapTupleSatisfiesVacuum(), there is no new HTSV_Result and there can be no
- * disagreement. We'll just handle such tuples as if they had become fully dead
- * right after this operation completes instead of in the middle of it. Note that
- * any tuple that becomes dead after the call to heap_page_prune() can't need to
- * be frozen, because it was visible to another session when vacuum started.
+ * result of heap_page_prune_and_freeze()'s visibility check. Without the
+ * second call to HeapTupleSatisfiesVacuum(), there is no new HTSV_Result and
+ * there can be no disagreement. We'll just handle such tuples as if they had
+ * become fully dead right after this operation completes instead of in the
+ * middle of it.
  *
  * vmbuffer is the buffer containing the VM block with visibility information
  * for the heap block, blkno. all_visible_according_to_vm is the saved
@@ -1444,26 +1442,24 @@ lazy_scan_prune(LVRelState *vacrel,
 	OffsetNumber offnum,
 				maxoff;
 	ItemId		itemid;
-	PruneResult presult;
+	PruneFreezeResult presult;
 	int			lpdead_items,
 				live_tuples,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
 	bool		hastup = false;
-	bool		do_freeze;
-	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
 
 	Assert(BufferGetBlockNumber(buf) == blkno);
 
 	/*
 	 * maxoff might be reduced following line pointer array truncation in
-	 * heap_page_prune.  That's safe for us to ignore, since the reclaimed
-	 * space will continue to look like LP_UNUSED items below.
+	 * heap_page_prune_and_freeze().  That's safe for us to ignore, since the
+	 * reclaimed space will continue to look like LP_UNUSED items below.
 	 */
 	maxoff = PageGetMaxOffsetNumber(page);
 
-	/* Initialize (or reset) page-level state */
+	/* Initialize pagefrz */
 	pagefrz.freeze_required = false;
 	pagefrz.FreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
 	pagefrz.FreezePageRelminMxid = vacrel->NewRelminMxid;
@@ -1475,7 +1471,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	recently_dead_tuples = 0;
 
 	/*
-	 * Prune all HOT-update chains in this page.
+	 * Prune all HOT-update chains and potentially freeze tuples on this page.
 	 *
 	 * We count the number of tuples removed from the page by the pruning step
 	 * in presult.ndeleted. It should not be confused with lpdead_items;
@@ -1486,8 +1482,8 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * items LP_UNUSED, so mark_unused_now should be true if no indexes and
 	 * false otherwise.
 	 */
-	heap_page_prune(rel, buf, vacrel->vistest, vacrel->nindexes == 0,
-					&pagefrz, &presult, &vacrel->offnum);
+	heap_page_prune_and_freeze(rel, buf, vacrel->vistest, vacrel->nindexes == 0,
+							   &pagefrz, &presult, &vacrel->offnum);
 
 	/*
 	 * Now scan the page to collect LP_DEAD items and check for tuples
@@ -1604,72 +1600,23 @@ lazy_scan_prune(LVRelState *vacrel,
 
 	vacrel->offnum = InvalidOffsetNumber;
 
-	/*
-	 * Freeze the page when heap_prepare_freeze_tuple indicates that at least
-	 * one XID/MXID from before FreezeLimit/MultiXactCutoff is present.  Also
-	 * freeze when pruning generated an FPI, if doing so means that we set the
-	 * page all-frozen afterwards (might not happen until final heap pass).
-	 */
-	do_freeze = pagefrz.freeze_required ||
-		(presult.all_visible_except_removable && presult.all_frozen &&
-		 presult.nfrozen > 0 &&
-		 fpi_before != pgWalUsage.wal_fpi);
+	Assert(MultiXactIdIsValid(presult.new_relminmxid));
+	vacrel->NewRelfrozenXid = presult.new_relfrozenxid;
+	Assert(TransactionIdIsValid(presult.new_relfrozenxid));
+	vacrel->NewRelminMxid = presult.new_relminmxid;
 
-	if (do_freeze)
+	if (presult.nfrozen > 0)
 	{
-		TransactionId snapshotConflictHorizon;
-
 		/*
-		 * We're freezing the page.  Our final NewRelfrozenXid doesn't need to
-		 * be affected by the XIDs that are just about to be frozen anyway.
+		 * We never increment the frozen_pages instrumentation counter when
+		 * nfrozen == 0, since it only counts pages with newly frozen tuples
+		 * (don't confuse that with pages newly set all-frozen in VM).
 		 */
-		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
-
 		vacrel->frozen_pages++;
 
-		snapshotConflictHorizon = heap_frz_conflict_horizon(&presult, &pagefrz);
-
 		/* Using same cutoff when setting VM is now unnecessary */
-		if (presult.all_visible_except_removable && presult.all_frozen)
+		if (presult.all_frozen)
 			presult.frz_conflict_horizon = InvalidTransactionId;
-
-		/* Execute all freeze plans for page as a single atomic action */
-		heap_freeze_execute_prepared(vacrel->rel, buf,
-									 snapshotConflictHorizon,
-									 presult.frozen, presult.nfrozen);
-	}
-	else if (presult.all_frozen && presult.nfrozen == 0)
-	{
-		/* Page should be all visible except to-be-removed tuples */
-		Assert(presult.all_visible_except_removable);
-
-		/*
-		 * We have no freeze plans to execute, so there's no added cost from
-		 * following the freeze path.  That's why it was chosen. This is
-		 * important in the case where the page only contains totally frozen
-		 * tuples at this point (perhaps only following pruning). Such pages
-		 * can be marked all-frozen in the VM by our caller, even though none
-		 * of its tuples were newly frozen here (note that the "no freeze"
-		 * path never sets pages all-frozen).
-		 *
-		 * We never increment the frozen_pages instrumentation counter here,
-		 * since it only counts pages with newly frozen tuples (don't confuse
-		 * that with pages newly set all-frozen in VM).
-		 */
-		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
-	}
-	else
-	{
-		/*
-		 * Page requires "no freeze" processing.  It might be set all-visible
-		 * in the visibility map, but it can never be set all-frozen.
-		 */
-		vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
-		presult.all_frozen = false;
-		presult.nfrozen = 0;	/* avoid miscounts in instrumentation */
 	}
 
 	/*
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 9eea1ed315a..7bffe09fb5d 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1715,9 +1715,9 @@ TransactionIdIsActive(TransactionId xid)
  * Note: the approximate horizons (see definition of GlobalVisState) are
  * updated by the computations done here. That's currently required for
  * correctness and a small optimization. Without doing so it's possible that
- * heap vacuum's call to heap_page_prune() uses a more conservative horizon
- * than later when deciding which tuples can be removed - which the code
- * doesn't expect (breaking HOT).
+ * heap vacuum's call to heap_page_prune_and_freeze() uses a more conservative
+ * horizon than later when deciding which tuples can be removed - which the
+ * code doesn't expect (breaking HOT).
  */
 static void
 ComputeXidHorizons(ComputeXidHorizonsResult *h)
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index bea35afc4bd..69d97bb8ece 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -195,7 +195,7 @@ typedef struct HeapPageFreeze
 /*
  * Per-page state returned from pruning
  */
-typedef struct PruneResult
+typedef struct PruneFreezeResult
 {
 	int			ndeleted;		/* Number of tuples deleted from the page */
 	int			nnewlpdead;		/* Number of newly LP_DEAD items */
@@ -204,9 +204,10 @@ typedef struct PruneResult
 
 	/*
 	 * Tuple visibility is only computed once for each tuple, for correctness
-	 * and efficiency reasons; see comment in heap_page_prune() for details.
-	 * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
-	 * indicate no visibility has been computed, e.g. for LP_DEAD items.
+	 * and efficiency reasons; see comment in heap_page_prune_and_freeze() for
+	 * details. This is of type int8[], instead of HTSV_Result[], so we can
+	 * use -1 to indicate no visibility has been computed, e.g. for LP_DEAD
+	 * items.
 	 *
 	 * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
 	 * 1. Otherwise every access would need to subtract 1.
@@ -221,17 +222,18 @@ typedef struct PruneResult
 	/* Number of newly frozen tuples */
 	int			nfrozen;
 
-	/*
-	 * One entry for every tuple that we may freeze.
-	 */
-	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
-} PruneResult;
+	/* New value of relfrozenxid found by heap_page_prune_and_freeze() */
+	TransactionId new_relfrozenxid;
+
+	/* New value of relminmxid found by heap_page_prune_and_freeze() */
+	MultiXactId new_relminmxid;
+} PruneFreezeResult;
 
 /*
  * Pruning calculates tuple visibility once and saves the results in an array
- * of int8. See PruneResult.htsv for details. This helper function is meant to
- * guard against examining visibility status array members which have not yet
- * been computed.
+ * of int8. See PruneFreezeResult.htsv for details. This helper function is
+ * meant to guard against examining visibility status array members which have
+ * not yet been computed.
  */
 static inline HTSV_Result
 htsv_get_valid_status(int status)
@@ -307,6 +309,9 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
 								 Buffer *buffer, struct TM_FailureData *tmfd);
 
 extern void heap_inplace_update(Relation relation, HeapTuple tuple);
+
+extern TransactionId heap_frz_conflict_horizon(PruneFreezeResult *presult,
+											   HeapPageFreeze *pagefrz);
 extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 									  HeapPageFreeze *pagefrz,
 									  HeapTupleFreeze *frz, bool *totally_frozen);
@@ -333,12 +338,12 @@ extern TransactionId heap_index_delete_tuples(Relation rel,
 /* in heap/pruneheap.c */
 struct GlobalVisState;
 extern void heap_page_prune_opt(Relation relation, Buffer buffer);
-extern void heap_page_prune(Relation relation, Buffer buffer,
-							struct GlobalVisState *vistest,
-							bool mark_unused_now,
-							HeapPageFreeze *pagefrz,
-							PruneResult *presult,
-							OffsetNumber *off_loc);
+extern void heap_page_prune_and_freeze(Relation relation, Buffer buffer,
+									   struct GlobalVisState *vistest,
+									   bool mark_unused_now,
+									   HeapPageFreeze *pagefrz,
+									   PruneFreezeResult *presult,
+									   OffsetNumber *off_loc);
 extern void heap_page_prune_execute(Buffer buffer,
 									OffsetNumber *redirected, int nredirected,
 									OffsetNumber *nowdead, int ndead,
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index aa7a25b8f8c..1c1a4d305d6 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -2175,7 +2175,7 @@ ProjectionPath
 PromptInterruptContext
 ProtocolVersion
 PrsStorage
-PruneResult
+PruneFreezeResult
 PruneState
 PruneStepResult
 PsqlScanCallbacks
-- 
2.40.1

