From d7bf70e73ec52bb94d088f1be43712aff97a12c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=81lvaro=20Herrera?= <alvherre@kurilemu.de>
Date: Sun, 29 Mar 2026 23:11:42 +0200
Subject: [PATCH v4 2/2] Change heapam.c to obey tableam.h option bits directly

Redefining the bits at the heapam.h interface serves no purpose.  This
was done to maintain the independence of heapam when used outside of
table AM (which is very widespread, given that all system catalogs use
it that way); however, heapam no longer lives in a world where tableam.h
doesn't exist, so this separation of concerns is of no use.

Also, whenever somebody next defines a new TABLE_INSERT_* bit, the value
value would collide with HEAP_INSERT_SPECULATIVE.  Move the latter to
the other end of the bits word.

Discussion: https://postgr.es/m/202603171606.kf6pmhscqbqz@alvherre.pgsql
---
 src/backend/access/heap/heapam.c         | 20 ++++++++++----------
 src/backend/access/heap/hio.c            | 10 +++++-----
 src/backend/access/heap/rewriteheap.c    |  4 ++--
 src/backend/replication/logical/decode.c |  2 +-
 src/include/access/heapam.h              | 11 ++++++-----
 5 files changed, 24 insertions(+), 23 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 0645b2d5f58..8c5634ece24 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2138,9 +2138,9 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate)
  * See table_tuple_insert for comments about most of the input flags, except
  * that this routine directly takes a tuple rather than a slot.
  *
- * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
- * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
- * implement table_tuple_insert_speculative().
+ * In addition to the TABLE_INSERT_ options, there additionally is
+ * HEAP_INSERT_SPECULATIVE which is used to implement
+ * table_tuple_insert_speculative().
  *
  * On return the header fields of *tup are updated to match the stored tuple;
  * in particular tup->t_self receives the actual TID where the tuple was
@@ -2228,7 +2228,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 	 * Don't set it if we are in bootstrap mode or we are inserting a frozen
 	 * tuple, as there is no further pruning/freezing needed in those cases.
 	 */
-	if (TransactionIdIsNormal(xid) && !(options & HEAP_INSERT_FROZEN))
+	if (TransactionIdIsNormal(xid) && !(options & TABLE_INSERT_FROZEN))
 		PageSetPrunable(page, xid);
 
 	MarkBufferDirty(buffer);
@@ -2275,7 +2275,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 		 * image. (XXX We could alternatively store a pointer into the FPW).
 		 */
 		if (RelationIsLogicallyLogged(relation) &&
-			!(options & HEAP_INSERT_NO_LOGICAL))
+			!(options & TABLE_INSERT_NO_LOGICAL))
 		{
 			xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
 			bufflags |= REGBUF_KEEP_DATA;
@@ -2364,7 +2364,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
 	tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
 	tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
 	HeapTupleHeaderSetXmin(tup->t_data, xid);
-	if (options & HEAP_INSERT_FROZEN)
+	if (options & TABLE_INSERT_FROZEN)
 		HeapTupleHeaderSetXminFrozen(tup->t_data);
 
 	HeapTupleHeaderSetCmin(tup->t_data, cid);
@@ -2445,7 +2445,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 	int			npages_used = 0;
 
 	/* currently not needed (thus unsupported) for heap_multi_insert() */
-	Assert(!(options & HEAP_INSERT_NO_LOGICAL));
+	Assert(!(options & TABLE_INSERT_NO_LOGICAL));
 
 	AssertHasSnapshotForToast(relation);
 
@@ -2535,7 +2535,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 
 		starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
 
-		if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
+		if (starting_with_empty_page && (options & TABLE_INSERT_FROZEN))
 		{
 			all_frozen_set = true;
 			/* Lock the vmbuffer before entering the critical section */
@@ -2583,7 +2583,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 		 * page, mark it as all-frozen and update the visibility map. We're
 		 * already holding a pin on the vmbuffer.
 		 */
-		if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
+		if (PageIsAllVisible(page) && !(options & TABLE_INSERT_FROZEN))
 		{
 			all_visible_cleared = true;
 			PageClearAllVisible(page);
@@ -2655,7 +2655,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 
 			/*
 			 * We don't have to worry about including a conflict xid in the
-			 * WAL record, as HEAP_INSERT_FROZEN intentionally violates
+			 * WAL record, as TABLE_INSERT_FROZEN intentionally violates
 			 * visibility rules.
 			 */
 			if (all_frozen_set)
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 1097f44a74e..146c8cc4a68 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -469,12 +469,12 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate,
  *	which is indicated by these arguments not being InvalidBuffer on entry.
  *
  *	We normally use FSM to help us find free space.  However,
- *	if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
+ *	if TABLE_INSERT_SKIP_FSM is specified, we just append a new empty page to
  *	the end of the relation if the tuple won't fit on the current target page.
  *	This can save some cycles when we know the relation is new and doesn't
  *	contain useful amounts of free space.
  *
- *	HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
+ *	TABLE_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
  *	relation, if the caller holds exclusive lock and is careful to invalidate
  *	relation's smgr_targblock before the first insertion --- that ensures that
  *	all insertions will occur into newly added pages and not be intermixed
@@ -503,7 +503,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
 						  Buffer *vmbuffer, Buffer *vmbuffer_other,
 						  int num_pages)
 {
-	bool		use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
+	bool		use_fsm = !(options & TABLE_INSERT_SKIP_FSM);
 	Buffer		buffer = InvalidBuffer;
 	Page		page;
 	Size		nearlyEmptyFreeSpace,
@@ -621,7 +621,7 @@ loop:
 			/*
 			 * If the page is empty, pin vmbuffer to set all_frozen bit later.
 			 */
-			if ((options & HEAP_INSERT_FROZEN) &&
+			if ((options & TABLE_INSERT_FROZEN) &&
 				(PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0))
 				visibilitymap_pin(relation, targetBlock, vmbuffer);
 
@@ -774,7 +774,7 @@ loop:
 	 * do IO while the buffer is locked, so we unlock the page first if IO is
 	 * needed (necessitating checks below).
 	 */
-	if (options & HEAP_INSERT_FROZEN)
+	if (options & TABLE_INSERT_FROZEN)
 	{
 		Assert(PageGetMaxOffsetNumber(page) == 0);
 
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 6b19ac3030d..b21dd6bb26b 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -618,14 +618,14 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
 	}
 	else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
 	{
-		int			options = HEAP_INSERT_SKIP_FSM;
+		int			options = TABLE_INSERT_SKIP_FSM;
 
 		/*
 		 * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
 		 * for the TOAST table are not logically decoded.  The main heap is
 		 * WAL-logged as XLOG FPI records, which are not logically decoded.
 		 */
-		options |= HEAP_INSERT_NO_LOGICAL;
+		options |= TABLE_INSERT_NO_LOGICAL;
 
 		heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL,
 											  options);
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 3c027bcb2f7..d80a26ce88e 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -898,7 +898,7 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
 
 	/*
 	 * Ignore insert records without new tuples (this does happen when
-	 * raw_heap_insert marks the TOAST record as HEAP_INSERT_NO_LOGICAL).
+	 * raw_heap_insert marks the TOAST record as TABLE_INSERT_NO_LOGICAL).
 	 */
 	if (!(xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE))
 		return;
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 77560b22877..10b26e56ff0 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -33,11 +33,12 @@
 #include "utils/snapshot.h"
 
 
-/* "options" flag bits for heap_insert */
-#define HEAP_INSERT_SKIP_FSM	TABLE_INSERT_SKIP_FSM
-#define HEAP_INSERT_FROZEN		TABLE_INSERT_FROZEN
-#define HEAP_INSERT_NO_LOGICAL	TABLE_INSERT_NO_LOGICAL
-#define HEAP_INSERT_SPECULATIVE 0x0010
+/*
+ * "options" flag bits for heap_insert.  That routine also obeys bits defined
+ * for table_tuple_insert; see tableam.h.  We define these starting from the
+ * opposite end of the options word.
+ */
+#define HEAP_INSERT_SPECULATIVE (1 << 31)
 
 /* "options" flag bits for heap_page_prune_and_freeze */
 #define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW		(1 << 0)
-- 
2.47.3

