From 07af817d3a9ebe4cf01a4652a5300821181d504b Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Wed, 7 Jan 2026 13:32:18 -0500
Subject: [PATCH v12 1/8] Streamline buffer rejection for bulkreads of unlogged
 tables

Bulk-read buffer access strategies reject reusing a buffer from the
buffer access strategy ring if reusing it would require flushing WAL.
Unlogged relations never require WAL flushes, so this check can be
skipped. This avoids taking the buffer header lock unnecessarily.

Refactor this into StrategyRejectBuffer() itself, which also avoids LSN
checking for non-bulkread buffer access strategies.
---
 src/backend/storage/buffer/bufmgr.c   | 69 ++++++++++++++++++++-------
 src/backend/storage/buffer/freelist.c | 13 ++++-
 src/include/storage/buf_internals.h   |  2 +
 3 files changed, 66 insertions(+), 18 deletions(-)

diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index a036c2aa275..29c11d2d357 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2482,26 +2482,15 @@ again:
 		/*
 		 * If using a nondefault strategy, and writing the buffer would
 		 * require a WAL flush, let the strategy decide whether to go ahead
-		 * and write/reuse the buffer or to choose another victim.  We need a
-		 * lock to inspect the page LSN, so this can't be done inside
+		 * and write/reuse the buffer or to choose another victim. We need the
+		 * content lock to inspect the page LSN, so this can't be done inside
 		 * StrategyGetBuffer.
 		 */
-		if (strategy != NULL)
+		if (StrategyRejectBuffer(strategy, buf_hdr, from_ring))
 		{
-			XLogRecPtr	lsn;
-
-			/* Read the LSN while holding buffer header lock */
-			buf_state = LockBufHdr(buf_hdr);
-			lsn = BufferGetLSN(buf_hdr);
-			UnlockBufHdr(buf_hdr);
-
-			if (XLogNeedsFlush(lsn)
-				&& StrategyRejectBuffer(strategy, buf_hdr, from_ring))
-			{
-				LWLockRelease(content_lock);
-				UnpinBuffer(buf_hdr);
-				goto again;
-			}
+			LWLockRelease(content_lock);
+			UnpinBuffer(buf_hdr);
+			goto again;
 		}
 
 		/* OK, do the I/O */
@@ -3416,6 +3405,52 @@ TrackNewBufferPin(Buffer buf)
 							  BLCKSZ);
 }
 
+/*
+ * Returns true if the buffer needs WAL flushed before it can be written out.
+ * *lsn is set to the current page LSN.
+ *
+ * If the result is required to be correct, the caller must hold a buffer
+ * content lock. If they only hold a shared content lock, we'll need to
+ * acquire the buffer header spinlock, so they must not already hold it.
+ *
+ * If the buffer is unlogged, *lsn shouldn't be used by the caller and is set
+ * to InvalidXLogRecPtr.
+ */
+bool
+BufferNeedsWALFlush(BufferDesc *bufdesc, bool exclusive, XLogRecPtr *lsn)
+{
+	uint32		buf_state = pg_atomic_read_u32(&bufdesc->state);
+	char	   *page;
+	Buffer		buffer;
+
+	/*
+	 * Unlogged buffers can't need WAL flush. See FlushBuffer() for more
+	 * details on unlogged relations with LSNs.
+	 */
+	if (!(buf_state & BM_PERMANENT))
+	{
+		*lsn = InvalidXLogRecPtr;
+		return false;
+	}
+
+	buffer = BufferDescriptorGetBuffer(bufdesc);
+	page = BufferGetPage(buffer);
+
+	Assert(BufferIsValid(buffer));
+
+	if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer) || exclusive)
+		*lsn = PageGetLSN(page);
+	else
+	{
+		LockBufHdr(bufdesc);
+		*lsn = PageGetLSN(page);
+		UnlockBufHdr(bufdesc);
+	}
+
+	return XLogNeedsFlush(*lsn);
+}
+
+
 #define ST_SORT sort_checkpoint_bufferids
 #define ST_ELEMENT_TYPE CkptSortItem
 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 9a93fb335fc..6a4452e2da0 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -780,12 +780,20 @@ IOContextForStrategy(BufferAccessStrategy strategy)
  * be written out and doing so would require flushing WAL too.  This gives us
  * a chance to choose a different victim.
  *
+ * The buffer must be pinned and content locked and the buffer header spinlock
+ * must not be held.
+ *
  * Returns true if buffer manager should ask for a new victim, and false
  * if this buffer should be written and re-used.
  */
 bool
 StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring)
 {
+	XLogRecPtr	lsn;
+
+	if (!strategy)
+		return false;
+
 	/* We only do this in bulkread mode */
 	if (strategy->btype != BAS_BULKREAD)
 		return false;
@@ -795,8 +803,11 @@ StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_r
 		strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
 		return false;
 
+	if (!BufferNeedsWALFlush(buf, false, &lsn))
+		return false;
+
 	/*
-	 * Remove the dirty buffer from the ring; necessary to prevent infinite
+	 * Remove the dirty buffer from the ring; necessary to prevent an infinite
 	 * loop if all ring members are dirty.
 	 */
 	strategy->buffers[strategy->current] = InvalidBuffer;
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index fa43cf4458d..3c774d7a1d2 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -15,6 +15,7 @@
 #ifndef BUFMGR_INTERNALS_H
 #define BUFMGR_INTERNALS_H
 
+#include "access/xlogdefs.h"
 #include "pgstat.h"
 #include "port/atomics.h"
 #include "storage/aio_types.h"
@@ -493,6 +494,7 @@ extern void ScheduleBufferTagForWriteback(WritebackContext *wb_context,
 										  IOContext io_context, BufferTag *tag);
 
 extern void TrackNewBufferPin(Buffer buf);
+extern bool BufferNeedsWALFlush(BufferDesc *bufdesc, bool exclusive, XLogRecPtr *lsn);
 
 /* solely to make it easier to write tests */
 extern bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
-- 
2.43.0

