From 1cf86f889057dc8637290e5c32c0df85d2437f48 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Mon, 27 Apr 2026 17:59:11 -0400
Subject: [PATCH v1_PGMASTER 2/7] Fix WAL logging of VM clears

Previously, we failed to register the visibility map buffer when
emitting WAL after clearing the VM bits for heap pages (recovery code
read the VM page normally). This meant that we couldn't emit FPIs of VM
pages when clearing VM bits. While this would not result in a checksum
error because the visibility map is read with RBM_ZERO_ON_ERROR, WAL
summarizer only includes pages which were registered, so a restore from
an incremental backup would not include the modified VM pages -- leading
to data corruption.

Fix this by registering the VM buffer in the WAL record when clearing VM
bits. This also means the VM buffer must be locked throughout the
duration of the critical section where we make changes to the VM and
heap page and emit WAL.

Bumps XLOG_PAGE_MAGIC

Author: Andres Freund <andres@anarazel.de>
Author: Melanie Plageman <melanieplageman@gmail.com>
Backpatch through: 17 when incremental backup was introduced
---
 contrib/pg_surgery/heap_surgery.c       |   7 +-
 src/backend/access/heap/heapam.c        | 224 ++++++++++++++++++++----
 src/backend/access/heap/heapam_xlog.c   | 145 +++++++++------
 src/backend/access/heap/pruneheap.c     |   4 +-
 src/backend/access/heap/visibilitymap.c |  36 ++--
 src/bin/pg_walsummary/t/002_blocks.pl   |   7 +-
 src/include/access/heapam_xlog.h        |  15 ++
 src/include/access/visibilitymap.h      |   4 +-
 src/include/access/xlog_internal.h      |   2 +-
 9 files changed, 330 insertions(+), 114 deletions(-)

diff --git a/contrib/pg_surgery/heap_surgery.c b/contrib/pg_surgery/heap_surgery.c
index b82c29e7cbf..80d6f073c4a 100644
--- a/contrib/pg_surgery/heap_surgery.c
+++ b/contrib/pg_surgery/heap_surgery.c
@@ -266,9 +266,10 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt)
 				 */
 				if (PageIsAllVisible(page))
 				{
+					LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
 					PageClearAllVisible(page);
 					visibilitymap_clear(rel->rd_locator, blkno, vmbuf,
-										VISIBILITYMAP_VALID_BITS);
+										VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
 					did_modify_vm = true;
 				}
 			}
@@ -331,7 +332,9 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt)
 
 		UnlockReleaseBuffer(buf);
 
-		if (vmbuf != InvalidBuffer)
+		if (did_modify_vm)
+			UnlockReleaseBuffer(vmbuf);
+		else if (BufferIsValid(vmbuf))
 			ReleaseBuffer(vmbuf);
 
 		/* Update the current_start_ptr before moving to the next page. */
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 777f8bec2e0..a97d3ae1cc4 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -60,7 +60,8 @@
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
 									 TransactionId xid, CommandId cid, uint32 options);
 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
-								  Buffer newbuf, HeapTuple oldtup,
+								  Buffer vmbuffer_old, Buffer newbuf,
+								  Buffer vmbuffer_new, HeapTuple oldtup,
 								  HeapTuple newtup, HeapTuple old_key_tuple,
 								  bool all_visible_cleared, bool new_all_visible_cleared,
 								  bool walLogical);
@@ -2062,10 +2063,12 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 	if (PageIsAllVisible(page))
 	{
 		all_visible_cleared = true;
+
+		LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
 		PageClearAllVisible(page);
 		visibilitymap_clear(relation->rd_locator,
 							ItemPointerGetBlockNumber(&(heaptup->t_self)),
-							vmbuffer, VISIBILITYMAP_VALID_BITS);
+							vmbuffer, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
 	}
 
 	/*
@@ -2159,13 +2162,23 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 		/* filtering by origin on a row level is much more efficient */
 		XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
 
+		if (all_visible_cleared)
+			XLogRegisterBuffer(1, vmbuffer, 0);
+
 		recptr = XLogInsert(RM_HEAP_ID, info);
 
 		PageSetLSN(page, recptr);
+
+		if (all_visible_cleared)
+			PageSetLSN(BufferGetPage(vmbuffer), recptr);
 	}
 
 	END_CRIT_SECTION();
 
+	/* release VM lock first, since it covers many heap blocks */
+	if (all_visible_cleared)
+		LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
 	UnlockReleaseBuffer(buffer);
 	if (vmbuffer != InvalidBuffer)
 		ReleaseBuffer(vmbuffer);
@@ -2439,10 +2452,12 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 		if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
 		{
 			all_visible_cleared = true;
+
+			LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
 			PageClearAllVisible(page);
 			visibilitymap_clear(relation->rd_locator,
 								BufferGetBlockNumber(buffer),
-								vmbuffer, VISIBILITYMAP_VALID_BITS);
+								vmbuffer, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
 		}
 		else if (all_frozen_set)
 		{
@@ -2574,7 +2589,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 			XLogBeginInsert();
 			XLogRegisterData(xlrec, tupledata - scratch.data);
 			XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
-			if (all_frozen_set)
+			if (all_frozen_set || all_visible_cleared)
 				XLogRegisterBuffer(1, vmbuffer, 0);
 
 			XLogRegisterBufData(0, tupledata, totaldatalen);
@@ -2585,7 +2600,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 			recptr = XLogInsert(RM_HEAP2_ID, info);
 
 			PageSetLSN(page, recptr);
-			if (all_frozen_set)
+			if (all_frozen_set || all_visible_cleared)
 			{
 				Assert(BufferIsDirty(vmbuffer));
 				PageSetLSN(BufferGetPage(vmbuffer), recptr);
@@ -2594,7 +2609,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 
 		END_CRIT_SECTION();
 
-		if (all_frozen_set)
+		if (all_frozen_set || all_visible_cleared)
 			LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
 
 		UnlockReleaseBuffer(buffer);
@@ -2997,9 +3012,10 @@ l1:
 	if (PageIsAllVisible(page))
 	{
 		all_visible_cleared = true;
+		LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
 		PageClearAllVisible(page);
 		visibilitymap_clear(relation->rd_locator, BufferGetBlockNumber(buffer),
-							vmbuffer, VISIBILITYMAP_VALID_BITS);
+							vmbuffer, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
 	}
 
 	/* store transaction information of xact deleting the tuple */
@@ -3090,13 +3106,23 @@ l1:
 		/* filtering by origin on a row level is much more efficient */
 		XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
 
+		if (all_visible_cleared)
+			XLogRegisterBuffer(1, vmbuffer, 0);
+
 		recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
 
 		PageSetLSN(page, recptr);
+
+		if (all_visible_cleared)
+			PageSetLSN(BufferGetPage(vmbuffer), recptr);
 	}
 
 	END_CRIT_SECTION();
 
+	/* release VM lock first, since it covers many heap blocks */
+	if (all_visible_cleared)
+		LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
 	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 	if (vmbuffer != InvalidBuffer)
@@ -3235,6 +3261,8 @@ heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
 	bool		key_intact;
 	bool		all_visible_cleared = false;
 	bool		all_visible_cleared_new = false;
+	bool		modified_vmbuffer = false;
+	bool		modified_vmbuffer_new = false;
 	bool		checked_lockers;
 	bool		locker_remains;
 	bool		id_has_external = false;
@@ -3343,8 +3371,11 @@ heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
 
 		UnlockReleaseBuffer(buffer);
 		Assert(!have_tuple_lock);
-		if (vmbuffer != InvalidBuffer)
+		if (BufferIsValid(vmbuffer))
+		{
 			ReleaseBuffer(vmbuffer);
+			vmbuffer = InvalidBuffer;
+		}
 		tmfd->ctid = *otid;
 		tmfd->xmax = InvalidTransactionId;
 		tmfd->cmax = InvalidCommandId;
@@ -3647,8 +3678,11 @@ l2:
 		UnlockReleaseBuffer(buffer);
 		if (have_tuple_lock)
 			UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
-		if (vmbuffer != InvalidBuffer)
+		if (BufferIsValid(vmbuffer))
+		{
 			ReleaseBuffer(vmbuffer);
+			vmbuffer = InvalidBuffer;
+		}
 		*update_indexes = TU_None;
 
 		bms_free(hot_attrs);
@@ -3831,10 +3865,15 @@ l2:
 		 * overhead would be unchanged, that doesn't seem necessarily
 		 * worthwhile.
 		 */
-		if (PageIsAllVisible(page) &&
-			visibilitymap_clear(relation->rd_locator, block, vmbuffer,
-								VISIBILITYMAP_ALL_FROZEN))
-			cleared_all_frozen = true;
+		if (PageIsAllVisible(page))
+		{
+			LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
+			if (visibilitymap_clear(relation->rd_locator, block, vmbuffer,
+									VISIBILITYMAP_ALL_FROZEN, InvalidXLogRecPtr))
+				cleared_all_frozen = true;
+			else
+				LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+		}
 
 		MarkBufferDirty(buffer);
 
@@ -3853,12 +3892,23 @@ l2:
 			xlrec.flags =
 				cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
 			XLogRegisterData(&xlrec, SizeOfHeapLock);
+
+			if (cleared_all_frozen)
+				XLogRegisterBuffer(1, vmbuffer, 0);
+
 			recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
 			PageSetLSN(page, recptr);
+
+			if (cleared_all_frozen)
+				PageSetLSN(BufferGetPage(vmbuffer), recptr);
 		}
 
 		END_CRIT_SECTION();
 
+		/* release VM lock first, since it covers many heap blocks */
+		if (cleared_all_frozen)
+			LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 
 		/*
@@ -4059,20 +4109,60 @@ l2:
 	/* record address of new tuple in t_ctid of old one */
 	oldtup.t_data->t_ctid = heaptup->t_self;
 
-	/* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
-	if (PageIsAllVisible(page))
+	/*
+	 * Clear PD_ALL_VISIBLE flags and reset visibility map bits for any heap
+	 * pages that were all-visible. If there are two heap pages, we may need
+	 * to clear VM bits for both.
+	 */
+	if (PageIsAllVisible(page) &&
+		newbuf != buffer && PageIsAllVisible(newpage) &&
+		vmbuffer_new == vmbuffer)
 	{
-		all_visible_cleared = true;
+		/*
+		 * This is the more complicated case: both the new and old heap pages
+		 * are all-visible and both their VM bits are on the same page of the
+		 * VM, so we register a single VM buffer as HEAP_UPDATE_BLKREF_VM_NEW
+		 * in the WAL record. We must be careful to only lock and register one
+		 * buffer, even though we modify it twice -- once for each heap
+		 * block's VM bits.
+		 */
 		PageClearAllVisible(page);
-		visibilitymap_clear(relation->rd_locator, BufferGetBlockNumber(buffer),
-							vmbuffer, VISIBILITYMAP_VALID_BITS);
-	}
-	if (newbuf != buffer && PageIsAllVisible(newpage))
-	{
-		all_visible_cleared_new = true;
 		PageClearAllVisible(newpage);
+		LockBuffer(vmbuffer_new, BUFFER_LOCK_EXCLUSIVE);
+		visibilitymap_clear(relation->rd_locator, block,
+							vmbuffer_new, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
 		visibilitymap_clear(relation->rd_locator, BufferGetBlockNumber(newbuf),
-							vmbuffer_new, VISIBILITYMAP_VALID_BITS);
+							vmbuffer_new, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
+		modified_vmbuffer_new = true;
+		all_visible_cleared = true;
+		all_visible_cleared_new = true;
+	}
+	else
+	{
+		/*
+		 * In all the remaining cases, we will clear at most one heap block's
+		 * VM bits per VM page, so we don't have to worry about locking or
+		 * registering the same buffer twice.
+		 */
+		if (PageIsAllVisible(page))
+		{
+			PageClearAllVisible(page);
+			LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
+			visibilitymap_clear(relation->rd_locator, block,
+								vmbuffer, VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
+			modified_vmbuffer = true;
+			all_visible_cleared = true;
+		}
+		if (newbuf != buffer && PageIsAllVisible(newpage))
+		{
+			PageClearAllVisible(newpage);
+			LockBuffer(vmbuffer_new, BUFFER_LOCK_EXCLUSIVE);
+			visibilitymap_clear(relation->rd_locator, BufferGetBlockNumber(newbuf),
+								vmbuffer_new,
+								VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
+			modified_vmbuffer_new = true;
+			all_visible_cleared_new = true;
+		}
 	}
 
 	if (newbuf != buffer)
@@ -4094,8 +4184,12 @@ l2:
 			log_heap_new_cid(relation, heaptup);
 		}
 
-		recptr = log_heap_update(relation, buffer,
-								 newbuf, &oldtup, heaptup,
+		recptr = log_heap_update(relation,
+								 buffer,
+								 modified_vmbuffer ? vmbuffer : InvalidBuffer,
+								 newbuf,
+								 modified_vmbuffer_new ? vmbuffer_new : InvalidBuffer,
+								 &oldtup, heaptup,
 								 old_key_tuple,
 								 all_visible_cleared,
 								 all_visible_cleared_new,
@@ -4105,10 +4199,20 @@ l2:
 			PageSetLSN(newpage, recptr);
 		}
 		PageSetLSN(page, recptr);
+
+		if (modified_vmbuffer)
+			PageSetLSN(BufferGetPage(vmbuffer), recptr);
+		if (modified_vmbuffer_new)
+			PageSetLSN(BufferGetPage(vmbuffer_new), recptr);
 	}
 
 	END_CRIT_SECTION();
 
+	if (modified_vmbuffer)
+		LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+	if (modified_vmbuffer_new)
+		LockBuffer(vmbuffer_new, BUFFER_LOCK_UNLOCK);
+
 	if (newbuf != buffer)
 		LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
 	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@@ -5157,11 +5261,15 @@ failed:
 		tuple->t_data->t_ctid = *tid;
 
 	/* Clear only the all-frozen bit on visibility map if needed */
-	if (PageIsAllVisible(page) &&
-		visibilitymap_clear(relation->rd_locator, block, vmbuffer,
-							VISIBILITYMAP_ALL_FROZEN))
-		cleared_all_frozen = true;
-
+	if (PageIsAllVisible(page))
+	{
+		LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
+		if (visibilitymap_clear(relation->rd_locator, block, vmbuffer,
+								VISIBILITYMAP_ALL_FROZEN, InvalidXLogRecPtr))
+			cleared_all_frozen = true;
+		else
+			LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+	}
 
 	MarkBufferDirty(*buffer);
 
@@ -5192,15 +5300,25 @@ failed:
 		xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
 		XLogRegisterData(&xlrec, SizeOfHeapLock);
 
+		if (cleared_all_frozen)
+			XLogRegisterBuffer(1, vmbuffer, 0);
+
 		/* we don't decode row locks atm, so no need to log the origin */
 
 		recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
 
 		PageSetLSN(page, recptr);
+
+		if (cleared_all_frozen)
+			PageSetLSN(BufferGetPage(vmbuffer), recptr);
 	}
 
 	END_CRIT_SECTION();
 
+	/* release VM lock first, since it covers many heap blocks */
+	if (cleared_all_frozen)
+		LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
 	result = TM_Ok;
 
 out_locked:
@@ -5911,10 +6029,15 @@ l4:
 								  xid, mode, false,
 								  &new_xmax, &new_infomask, &new_infomask2);
 
-		if (PageIsAllVisible(BufferGetPage(buf)) &&
-			visibilitymap_clear(rel->rd_locator, block, vmbuffer,
-								VISIBILITYMAP_ALL_FROZEN))
-			cleared_all_frozen = true;
+		if (PageIsAllVisible(BufferGetPage(buf)))
+		{
+			LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
+			if (visibilitymap_clear(rel->rd_locator, block, vmbuffer,
+									VISIBILITYMAP_ALL_FROZEN, InvalidXLogRecPtr))
+				cleared_all_frozen = true;
+			else
+				LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+		}
 
 		START_CRIT_SECTION();
 
@@ -5945,13 +6068,23 @@ l4:
 
 			XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
 
+			if (cleared_all_frozen)
+				XLogRegisterBuffer(1, vmbuffer, 0);
+
 			recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
 
 			PageSetLSN(page, recptr);
+
+			if (cleared_all_frozen)
+				PageSetLSN(BufferGetPage(vmbuffer), recptr);
 		}
 
 		END_CRIT_SECTION();
 
+		/* release VM lock first, since it covers many heap blocks */
+		if (cleared_all_frozen)
+			LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
 next:
 		/* if we find the end of update chain, we're done. */
 		if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
@@ -8772,8 +8905,9 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
  * have modified the buffer(s) and marked them dirty.
  */
 static XLogRecPtr
-log_heap_update(Relation reln, Buffer oldbuf,
-				Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
+log_heap_update(Relation reln, Buffer oldbuf, Buffer vmbuffer_old,
+				Buffer newbuf, Buffer vmbuffer_new,
+				HeapTuple oldtup, HeapTuple newtup,
 				HeapTuple old_key_tuple,
 				bool all_visible_cleared, bool new_all_visible_cleared,
 				bool walLogical)
@@ -8901,9 +9035,9 @@ log_heap_update(Relation reln, Buffer oldbuf,
 	if (need_tuple_data)
 		bufflags |= REGBUF_KEEP_DATA;
 
-	XLogRegisterBuffer(0, newbuf, bufflags);
+	XLogRegisterBuffer(HEAP_UPDATE_BLKREF_NEW, newbuf, bufflags);
 	if (oldbuf != newbuf)
-		XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
+		XLogRegisterBuffer(HEAP_UPDATE_BLKREF_OLD, oldbuf, REGBUF_STANDARD);
 
 	XLogRegisterData(&xlrec, SizeOfHeapUpdate);
 
@@ -8980,6 +9114,20 @@ log_heap_update(Relation reln, Buffer oldbuf,
 						 old_key_tuple->t_len - SizeofHeapTupleHeader);
 	}
 
+	/*
+	 * Register VM buffers. If the old and new heap pages' VM bits are on the
+	 * same VM page, the caller passes only vmbuffer_new (mirroring the heap
+	 * page convention where block 0 = new is always registered).
+	 */
+	Assert((BufferIsInvalid(vmbuffer_old) && BufferIsInvalid(vmbuffer_new)) ||
+		   (vmbuffer_old != vmbuffer_new));
+
+	if (BufferIsValid(vmbuffer_new))
+		XLogRegisterBuffer(HEAP_UPDATE_BLKREF_VM_NEW, vmbuffer_new, 0);
+
+	if (BufferIsValid(vmbuffer_old))
+		XLogRegisterBuffer(HEAP_UPDATE_BLKREF_VM_OLD, vmbuffer_old, 0);
+
 	/* filtering by origin on a row level is much more efficient */
 	XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
 
diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c
index 9dbab095795..604a21d8f27 100644
--- a/src/backend/access/heap/heapam_xlog.c
+++ b/src/backend/access/heap/heapam_xlog.c
@@ -309,13 +309,14 @@ heap_xlog_delete(XLogReaderState *record)
 	 */
 	if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(target_locator);
 		Buffer		vmbuffer = InvalidBuffer;
 
-		visibilitymap_pin(reln, blkno, &vmbuffer);
-		visibilitymap_clear(target_locator, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
+		if (XLogReadBufferForRedo(record, 1, &vmbuffer) == BLK_NEEDS_REDO)
+			visibilitymap_clear(target_locator,
+								blkno, vmbuffer,
+								VISIBILITYMAP_VALID_BITS, lsn);
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
 	}
 
 	if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@@ -396,13 +397,13 @@ heap_xlog_insert(XLogReaderState *record)
 	 */
 	if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(target_locator);
 		Buffer		vmbuffer = InvalidBuffer;
 
-		visibilitymap_pin(reln, blkno, &vmbuffer);
-		visibilitymap_clear(target_locator, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
+		if (XLogReadBufferForRedo(record, 1, &vmbuffer) == BLK_NEEDS_REDO)
+			visibilitymap_clear(target_locator, blkno, vmbuffer,
+								VISIBILITYMAP_VALID_BITS, lsn);
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
 	}
 
 	/*
@@ -523,18 +524,23 @@ heap_xlog_multi_insert(XLogReaderState *record)
 			 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
 
 	/*
-	 * The visibility map may need to be fixed even if the heap page is
-	 * already up-to-date.
+	 * If required, clear the VM first, to prevent all-visible temporarily
+	 * being set for a heap page that's not all visible anymore.
+	 *
+	 * If it were possible for XLH_INSERT_ALL_VISIBLE_CLEARED and
+	 * XLH_INSERT_ALL_FROZEN_SET to be present in the same record, doing the
+	 * XLogReadBufferForRedo() before the PageSetAllVisible() below would be a
+	 * problem, as it'd violate the rule that a heap page must never be set
+	 * all-visible in the VM while its PD_ALL_VISIBLE is clear.
 	 */
 	if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(rlocator);
-
-		visibilitymap_pin(reln, blkno, &vmbuffer);
-		visibilitymap_clear(rlocator, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
-		ReleaseBuffer(vmbuffer);
+		if (XLogReadBufferForRedo(record, 1, &vmbuffer) == BLK_NEEDS_REDO)
+			visibilitymap_clear(rlocator, blkno, vmbuffer,
+								VISIBILITYMAP_VALID_BITS, lsn);
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
 		vmbuffer = InvalidBuffer;
-		FreeFakeRelcacheEntry(reln);
 	}
 
 	if (isinit)
@@ -633,7 +639,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
 	buffer = InvalidBuffer;
 
 	/*
-	 * Read and update the visibility map (VM) block.
+	 * Read and update the visibility map (VM) block to set it frozen.
 	 *
 	 * We must always redo VM changes, even if the corresponding heap page
 	 * update was skipped due to the LSN interlock. Each VM block covers
@@ -706,6 +712,9 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 				nbuffer;
 	Page		opage,
 				npage;
+	bool		new_cleared,
+				old_cleared;
+	bool		same_vm_page;
 	OffsetNumber offnum;
 	ItemId		lp;
 	HeapTupleData oldtup;
@@ -739,19 +748,62 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 
 	ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
 
+	new_cleared = (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0;
+	old_cleared = (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0;
+
 	/*
-	 * The visibility map may need to be fixed even if the heap page is
-	 * already up-to-date.
+	 * If both the old and new heap pages were all-visible and their VM bits
+	 * are on the same VM page, that single VM page is registered as
+	 * HEAP_UPDATE_BLKREF_VM_NEW. Clear both heap blocks' VM bits from the
+	 * single provided VM buffer.
 	 */
-	if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
+	same_vm_page = new_cleared && old_cleared &&
+		!XLogRecHasBlockRef(record, HEAP_UPDATE_BLKREF_VM_OLD) &&
+		XLogRecHasBlockRef(record, HEAP_UPDATE_BLKREF_VM_NEW);
+
+	if (same_vm_page)
 	{
-		Relation	reln = CreateFakeRelcacheEntry(rlocator);
 		Buffer		vmbuffer = InvalidBuffer;
 
-		visibilitymap_pin(reln, oldblk, &vmbuffer);
-		visibilitymap_clear(rlocator, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
+		if (XLogReadBufferForRedo(record, HEAP_UPDATE_BLKREF_VM_NEW, &vmbuffer) ==
+			BLK_NEEDS_REDO)
+		{
+			visibilitymap_clear(rlocator, oldblk, vmbuffer,
+								VISIBILITYMAP_VALID_BITS, lsn);
+			visibilitymap_clear(rlocator, newblk, vmbuffer,
+								VISIBILITYMAP_VALID_BITS, lsn);
+		}
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
+	}
+	else
+	{
+		/*
+		 * We can be sure that we need to clear at most one heap page's VM
+		 * bits in each registered VM buffer.
+		 */
+		if (new_cleared)
+		{
+			Buffer		vmbuffer = InvalidBuffer;
+
+			if (XLogReadBufferForRedo(record, HEAP_UPDATE_BLKREF_VM_NEW, &vmbuffer) ==
+				BLK_NEEDS_REDO)
+				visibilitymap_clear(rlocator, newblk, vmbuffer,
+									VISIBILITYMAP_VALID_BITS, lsn);
+			if (BufferIsValid(vmbuffer))
+				UnlockReleaseBuffer(vmbuffer);
+		}
+		if (old_cleared)
+		{
+			Buffer		vmbuffer = InvalidBuffer;
+
+			if (XLogReadBufferForRedo(record, HEAP_UPDATE_BLKREF_VM_OLD, &vmbuffer) ==
+				BLK_NEEDS_REDO)
+				visibilitymap_clear(rlocator, oldblk, vmbuffer,
+									VISIBILITYMAP_VALID_BITS, lsn);
+			if (BufferIsValid(vmbuffer))
+				UnlockReleaseBuffer(vmbuffer);
+		}
 	}
 
 	/*
@@ -823,21 +875,6 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 	else
 		newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
 
-	/*
-	 * The visibility map may need to be fixed even if the heap page is
-	 * already up-to-date.
-	 */
-	if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
-	{
-		Relation	reln = CreateFakeRelcacheEntry(rlocator);
-		Buffer		vmbuffer = InvalidBuffer;
-
-		visibilitymap_pin(reln, newblk, &vmbuffer);
-		visibilitymap_clear(rlocator, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
-	}
-
 	/* Deal with new tuple */
 	if (newaction == BLK_NEEDS_REDO)
 	{
@@ -1031,16 +1068,14 @@ heap_xlog_lock(XLogReaderState *record)
 		RelFileLocator rlocator;
 		Buffer		vmbuffer = InvalidBuffer;
 		BlockNumber block;
-		Relation	reln;
 
 		XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
-		reln = CreateFakeRelcacheEntry(rlocator);
-
-		visibilitymap_pin(reln, block, &vmbuffer);
-		visibilitymap_clear(rlocator, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
 
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
+		if (XLogReadBufferForRedo(record, 1, &vmbuffer) == BLK_NEEDS_REDO)
+			visibilitymap_clear(rlocator, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN,
+								lsn);
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
 	}
 
 	if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@@ -1107,16 +1142,14 @@ heap_xlog_lock_updated(XLogReaderState *record)
 		RelFileLocator rlocator;
 		Buffer		vmbuffer = InvalidBuffer;
 		BlockNumber block;
-		Relation	reln;
 
 		XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
-		reln = CreateFakeRelcacheEntry(rlocator);
-
-		visibilitymap_pin(reln, block, &vmbuffer);
-		visibilitymap_clear(rlocator, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
 
-		ReleaseBuffer(vmbuffer);
-		FreeFakeRelcacheEntry(reln);
+		if (XLogReadBufferForRedo(record, 1, &vmbuffer) == BLK_NEEDS_REDO)
+			visibilitymap_clear(rlocator, block, vmbuffer,
+								VISIBILITYMAP_ALL_FROZEN, lsn);
+		if (BufferIsValid(vmbuffer))
+			UnlockReleaseBuffer(vmbuffer);
 	}
 
 	if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 184fb70a24f..a9feb16e3ec 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -927,9 +927,11 @@ heap_page_fix_vm_corruption(PruneState *prstate, OffsetNumber offnum,
 
 	if (do_clear_vm)
 	{
+		LockBuffer(prstate->vmbuffer, BUFFER_LOCK_EXCLUSIVE);
 		visibilitymap_clear(prstate->relation->rd_locator, prstate->block,
 							prstate->vmbuffer,
-							VISIBILITYMAP_VALID_BITS);
+							VISIBILITYMAP_VALID_BITS, InvalidXLogRecPtr);
+		LockBuffer(prstate->vmbuffer, BUFFER_LOCK_UNLOCK);
 		prstate->old_vmbits = 0;
 	}
 }
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 3b699291b23..03f12a4b180 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -139,22 +139,26 @@
 static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
 static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
 
-
 /*
  *	visibilitymap_clear - clear specified bits for one page in visibility map
  *
- * You must pass a buffer containing the correct map page to this function.
- * Call visibilitymap_pin first to pin the right one. This function doesn't do
- * any I/O.  Returns true if any bits have been cleared and false otherwise.
+ * You must pass a buffer containing the correct map page to this function,
+ * which already needs to be pinned and locked exclusively.
+ *
+ * This function doesn't do any I/O. Returns true if any bits have been
+ * cleared and false otherwise.
  */
 bool
-visibilitymap_clear(RelFileLocator rlocator,
-					BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
+visibilitymap_clear(RelFileLocator rlocator, BlockNumber heapBlk,
+					Buffer vmbuf, uint8 flags, XLogRecPtr lsn)
 {
-	BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
 	int			mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
 	int			mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
+#ifdef USE_ASSERT_CHECKING
+	BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+#endif
 	uint8		mask = flags << mapOffset;
+	Page		page;
 	char	   *map;
 	bool		cleared = false;
 
@@ -168,11 +172,11 @@ visibilitymap_clear(RelFileLocator rlocator,
 		 heapBlk);
 #endif
 
-	if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
-		elog(ERROR, "wrong buffer passed to visibilitymap_clear");
+	Assert(BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock);
+	Assert(BufferIsLockedByMeInMode(vmbuf, BUFFER_LOCK_EXCLUSIVE));
 
-	LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
-	map = PageGetContents(BufferGetPage(vmbuf));
+	page = BufferGetPage(vmbuf);
+	map = PageGetContents(page);
 
 	if (map[mapByte] & mask)
 	{
@@ -182,7 +186,15 @@ visibilitymap_clear(RelFileLocator rlocator,
 		cleared = true;
 	}
 
-	LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
+	if (XLogRecPtrIsValid(lsn))
+	{
+		/*
+		 * Outside of recovery we won't have an LSN to stamp, as we make page
+		 * changes before emitting WAL
+		 */
+		Assert(InRecovery);
+		PageSetLSN(page, lsn);
+	}
 
 	return cleared;
 }
diff --git a/src/bin/pg_walsummary/t/002_blocks.pl b/src/bin/pg_walsummary/t/002_blocks.pl
index f5fe94f9f15..3fcf6c1041c 100644
--- a/src/bin/pg_walsummary/t/002_blocks.pl
+++ b/src/bin/pg_walsummary/t/002_blocks.pl
@@ -93,13 +93,14 @@ my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
   split(m@/@, $end_lsn);
 ok(-f $filename, "WAL summary file exists");
 
-# Run pg_walsummary on it. We expect exactly two blocks to be modified,
-# block 0 and one other.
+# Run pg_walsummary on it. We expect exactly three blocks to be modified,
+# block 0 (old tuple), another block (new tuple) and the block for the VM.
 my ($stdout, $stderr) = run_command([ 'pg_walsummary', '-i', $filename ]);
 note($stdout);
 @lines = split(/\n/, $stdout);
 like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified");
+like($stdout, qr/FORK vm: block 0$/m, "stdout shows VM block 0 modified");
 is($stderr, '', 'stderr is empty');
-is(0 + @lines, 2, "UPDATE modified 2 blocks");
+is(0 + @lines, 3, "UPDATE modified 3 blocks");
 
 done_testing();
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index fdca7d821c8..4ce028f756d 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -216,7 +216,22 @@ typedef struct xl_multi_insert_tuple
  * included even if a full-page image was taken.
  *
  * Backup blk 1: old page, if different. (no data, just a reference to the blk)
+ *
+ * Backup blk 2: VM page covering the new heap page. Registered whenever
+ *               XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED is set. Also covers the old
+ *               heap page's VM bits when both heap pages map to the same VM
+ *               page.
+ *
+ * Backup blk 3: VM page covering the old heap page. Only registered when
+ *               XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is set and the old heap
+ *               page's VM bits are on a different VM page from the new heap
+ *               page's.
  */
+#define HEAP_UPDATE_BLKREF_NEW		0
+#define HEAP_UPDATE_BLKREF_OLD		1
+#define HEAP_UPDATE_BLKREF_VM_NEW	2
+#define HEAP_UPDATE_BLKREF_VM_OLD	3
+
 typedef struct xl_heap_update
 {
 	TransactionId old_xmax;		/* xmax of the old tuple */
diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h
index b860c4ef3ca..f183c11a296 100644
--- a/src/include/access/visibilitymap.h
+++ b/src/include/access/visibilitymap.h
@@ -15,6 +15,7 @@
 #define VISIBILITYMAP_H
 
 #include "access/visibilitymapdefs.h"
+#include "access/xlogdefs.h"
 #include "storage/block.h"
 #include "storage/buf.h"
 #include "storage/relfilelocator.h"
@@ -27,7 +28,8 @@
 	((visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_FROZEN) != 0)
 
 extern bool visibilitymap_clear(RelFileLocator rlocator, BlockNumber heapBlk,
-								Buffer vmbuf, uint8 flags);
+								Buffer vmbuf, uint8 flags, XLogRecPtr lsn);
+
 extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk,
 							  Buffer *vmbuf);
 extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 13ae3ad4fbb..55663e6f4af 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -32,7 +32,7 @@
 /*
  * Each page of XLOG file has a header like this:
  */
-#define XLOG_PAGE_MAGIC 0xD11F	/* can be used as WAL version indicator */
+#define XLOG_PAGE_MAGIC 0xD120	/* can be used as WAL version indicator */
 
 typedef struct XLogPageHeaderData
 {
-- 
2.43.0

