From abf51df70c27ad0fd255ffe5877a25b556709baa Mon Sep 17 00:00:00 2001
From: Andres Freund <andres@anarazel.de>
Date: Wed, 11 Mar 2026 15:12:53 -0400
Subject: [PATCH v13 2/3] Use UnlockReleaseBuffer() in more places

An upcoming commit will make UnlockReleaseBuffer() considerably faster and
more scalable than doing LockBuffer(BUFFER_LOCK_UNLOCK); ReleaseBuffer();. But
it's a small performance benefit even as-is.

Most of the callsites changed in this patch are not performance sensitive,
however some, like the nbtree ones, are in critical paths.

This patch changes all the easily convertible places over to
UnlockReleaseBuffer() mainly because I needed to check all of them anyway, and
reducing cases where the operations are done separately makes the checking
easier.

Discussion: https://postgr.es/m/
---
 src/backend/access/heap/heapam.c          |  6 ++--
 src/backend/access/heap/hio.c             |  7 +++--
 src/backend/access/nbtree/nbtpage.c       | 36 +++++++++++++++++++----
 src/backend/storage/buffer/bufmgr.c       |  3 +-
 src/backend/storage/freespace/freespace.c |  3 +-
 contrib/amcheck/verify_gin.c              |  6 ++--
 contrib/pageinspect/rawpage.c             |  3 +-
 contrib/pgstattuple/pgstatindex.c         |  3 +-
 src/test/modules/test_aio/test_aio.c      |  7 ++---
 9 files changed, 44 insertions(+), 30 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 8f1c11a9350..7b988ff36df 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1687,8 +1687,7 @@ heap_fetch(Relation relation,
 	offnum = ItemPointerGetOffsetNumber(tid);
 	if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
 	{
-		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		ReleaseBuffer(buffer);
+		UnlockReleaseBuffer(buffer);
 		*userbuf = InvalidBuffer;
 		tuple->t_data = NULL;
 		return false;
@@ -1704,8 +1703,7 @@ heap_fetch(Relation relation,
 	 */
 	if (!ItemIdIsNormal(lp))
 	{
-		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		ReleaseBuffer(buffer);
+		UnlockReleaseBuffer(buffer);
 		*userbuf = InvalidBuffer;
 		tuple->t_data = NULL;
 		return false;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index d26ceacd38c..1097f44a74e 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -711,14 +711,15 @@ loop:
 		 * unlock the two buffers in, so this can be slightly simpler than the
 		 * code above.
 		 */
-		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 		if (otherBuffer == InvalidBuffer)
-			ReleaseBuffer(buffer);
+			UnlockReleaseBuffer(buffer);
 		else if (otherBlock != targetBlock)
 		{
+			UnlockReleaseBuffer(buffer);
 			LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
-			ReleaseBuffer(buffer);
 		}
+		else
+			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 		/* Is there an ongoing bulk extension? */
 		if (bistate && bistate->next_free != InvalidBlockNumber)
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 4125c185e8b..acefe68a382 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1007,24 +1007,48 @@ _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
 
 	Assert(BlockNumberIsValid(blkno));
 	if (BufferIsValid(obuf))
-		_bt_unlockbuf(rel, obuf);
-	buf = ReleaseAndReadBuffer(obuf, rel, blkno);
+	{
+		if (BufferGetBlockNumber(obuf) == blkno)
+		{
+			/* trade in old lock mode for new lock */
+			_bt_unlockbuf(rel, obuf);
+			buf = obuf;
+		}
+		else
+		{
+			/* release lock and pin at once, that's a bit more efficient */
+			_bt_relbuf(rel, obuf);
+			buf = ReadBuffer(rel, blkno);
+		}
+	}
+	else
+		buf = ReadBuffer(rel, blkno);
+
 	_bt_lockbuf(rel, buf, access);
-
 	_bt_checkpage(rel, buf);
+
 	return buf;
 }
 
 /*
  *	_bt_relbuf() -- release a locked buffer.
  *
- * Lock and pin (refcount) are both dropped.
+ * Lock and pin (refcount) are both dropped. This is a bit more efficient than
+ * doing the two operations separately.
  */
 void
 _bt_relbuf(Relation rel, Buffer buf)
 {
-	_bt_unlockbuf(rel, buf);
-	ReleaseBuffer(buf);
+	/*
+	 * Buffer is pinned and locked, which means that it is expected to be
+	 * defined and addressable.  Check that proactively.
+	 */
+	VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
+
+	UnlockReleaseBuffer(buf);
+
+	if (!RelationUsesLocalBuffers(rel))
+		VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
 }
 
 /*
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index e5c06882395..53b327200d7 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2531,8 +2531,7 @@ again:
 			XLogNeedsFlush(BufferGetLSN(buf_hdr)) &&
 			StrategyRejectBuffer(strategy, buf_hdr, from_ring))
 		{
-			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-			UnpinBuffer(buf_hdr);
+			UnlockReleaseBuffer(buf);
 			goto again;
 		}
 
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index b9a8f368a63..40d67a96178 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -915,9 +915,8 @@ fsm_vacuum_page(Relation rel, FSMAddress addr,
 		((FSMPage) PageGetContents(page))->fp_next_slot = 0;
 		BufferFinishSetHintBits(buf, false, false);
 	}
-	LockBuffer(buf, BUFFER_LOCK_UNLOCK);
 
-	ReleaseBuffer(buf);
+	UnlockReleaseBuffer(buf);
 
 	return max_avail;
 }
diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c
index f7a15a21467..abfad07d5e4 100644
--- a/contrib/amcheck/verify_gin.c
+++ b/contrib/amcheck/verify_gin.c
@@ -368,8 +368,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
 				stack->next = ptr;
 			}
 		}
-		LockBuffer(buffer, GIN_UNLOCK);
-		ReleaseBuffer(buffer);
+		UnlockReleaseBuffer(buffer);
 
 		/* Step to next item in the queue */
 		stack_next = stack->next;
@@ -642,8 +641,7 @@ gin_check_parent_keys_consistency(Relation rel,
 			prev_attnum = current_attnum;
 		}
 
-		LockBuffer(buffer, GIN_UNLOCK);
-		ReleaseBuffer(buffer);
+		UnlockReleaseBuffer(buffer);
 
 		/* Step to next item in the queue */
 		stack_next = stack->next;
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index 86fe245cac5..f3996dc39fc 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -193,8 +193,7 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
 
 	memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
 
-	LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-	ReleaseBuffer(buf);
+	UnlockReleaseBuffer(buf);
 
 	relation_close(rel, AccessShareLock);
 
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index ef723af1f19..a4716bd1b36 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -323,8 +323,7 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo)
 			indexStat.internal_pages++;
 
 		/* Unlock and release buffer */
-		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		ReleaseBuffer(buffer);
+		UnlockReleaseBuffer(buffer);
 	}
 
 	relation_close(rel, AccessShareLock);
diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c
index 2ae4a559fab..138e1259dfd 100644
--- a/src/test/modules/test_aio/test_aio.c
+++ b/src/test/modules/test_aio/test_aio.c
@@ -221,9 +221,7 @@ modify_rel_block(PG_FUNCTION_ARGS)
 	 */
 	memcpy(page, BufferGetPage(buf), BLCKSZ);
 
-	LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
-	ReleaseBuffer(buf);
+	UnlockReleaseBuffer(buf);
 
 	/*
 	 * Don't want to have a buffer in-memory that's marked valid where the
@@ -496,8 +494,7 @@ invalidate_rel_block(PG_FUNCTION_ARGS)
 				else
 					FlushOneBuffer(buf);
 			}
-			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-			ReleaseBuffer(buf);
+			UnlockReleaseBuffer(buf);
 
 			if (BufferIsLocal(buf))
 				InvalidateLocalBuffer(GetLocalBufferDescriptor(-buf - 1), true);
-- 
2.53.0.1.gb2826b52eb

