From b871916ebc30ca69fbf61aa4f95394c407bcc1cd Mon Sep 17 00:00:00 2001
From: Andres Freund <andres@anarazel.de>
Date: Mon, 2 Feb 2026 09:54:01 -0500
Subject: [PATCH v12 1/6] heapam: Don't mimic MarkBufferDirtyHint() in inplace
 updates

Previously heap_inplace_update_and_unlock() used an operation order similar to
MarkBufferDirty(), to reduce the number of different approaches used for
updating buffers.  However, in an upcoming patch, MarkBufferDirtyHint() will
switch to using the update protocol used by most other places (enabled by hint
bits only being set while holding a share-exclusive lock).

Luckily it's pretty easy to adjust heap_inplace_update_and_unlock(), as a
comment already foresaw, we can use the normal order with the slight change of
updating the buffer contents after WAL logging.

Author:
Reviewed-by:
Discussion: https://postgr.es/m/
Backpatch:
---
 src/backend/access/heap/heapam.c | 34 ++++++++++++--------------------
 1 file changed, 13 insertions(+), 21 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 3004964ab7f..e387923b9bb 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6611,11 +6611,11 @@ heap_inplace_update_and_unlock(Relation relation,
 	/*----------
 	 * NO EREPORT(ERROR) from here till changes are complete
 	 *
-	 * Our buffer lock won't stop a reader having already pinned and checked
-	 * visibility for this tuple.  Hence, we write WAL first, then mutate the
-	 * buffer.  Like in MarkBufferDirtyHint() or RecordTransactionCommit(),
-	 * checkpoint delay makes that acceptable.  With the usual order of
-	 * changes, a crash after memcpy() and before XLogInsert() could allow
+	 * Our exclusive buffer lock won't stop a reader having already pinned and
+	 * checked visibility for this tuple. With the usual order of changes
+	 * (i.e. updating the buffer contents before WAL logging), a reader could
+	 * observe our not-yet-persistent update to relfrozenxid and update
+	 * datfrozenxid based on that. A crash in that moment could allow
 	 * datfrozenxid to overtake relfrozenxid:
 	 *
 	 * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
@@ -6627,21 +6627,16 @@ heap_inplace_update_and_unlock(Relation relation,
 	 * [crash]
 	 * [recovery restores datfrozenxid w/o relfrozenxid]
 	 *
-	 * Mimic MarkBufferDirtyHint() subroutine XLogSaveBufferForHint().
-	 * Specifically, use DELAY_CHKPT_START, and copy the buffer to the stack.
-	 * The stack copy facilitates a FPI of the post-mutation block before we
-	 * accept other sessions seeing it.  DELAY_CHKPT_START allows us to
-	 * XLogInsert() before MarkBufferDirty().  Since XLogSaveBufferForHint()
-	 * can operate under BUFFER_LOCK_SHARED, it can't avoid DELAY_CHKPT_START.
-	 * This function, however, likely could avoid it with the following order
-	 * of operations: MarkBufferDirty(), XLogInsert(), memcpy().  Opt to use
-	 * DELAY_CHKPT_START here, too, as a way to have fewer distinct code
-	 * patterns to analyze.  Inplace update isn't so frequent that it should
-	 * pursue the small optimization of skipping DELAY_CHKPT_START.
+	 * As we hold an exclusive lock - preventing the buffer from being written
+	 * out once dirty - we can work around this as follows: MarkBufferDirty(),
+	 * XLogInsert(), memcpy().
+	 *
+	 * That way any action a reader of the in-place-updated value takes will
+	 * be WAL logged after this change.
 	 */
-	Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
 	START_CRIT_SECTION();
-	MyProc->delayChkptFlags |= DELAY_CHKPT_START;
+
+	MarkBufferDirty(buffer);
 
 	/* XLOG stuff */
 	if (RelationNeedsWAL(relation))
@@ -6690,8 +6685,6 @@ heap_inplace_update_and_unlock(Relation relation,
 
 	memcpy(dst, src, newlen);
 
-	MarkBufferDirty(buffer);
-
 	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 	/*
@@ -6700,7 +6693,6 @@ heap_inplace_update_and_unlock(Relation relation,
 	 */
 	AtInplace_Inval();
 
-	MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
 	END_CRIT_SECTION();
 	UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
 
-- 
2.48.1.76.g4e746b1a31.dirty

