diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index dc762f9..42eac78 100644
*** a/src/backend/access/heap/heapam.c
--- b/src/backend/access/heap/heapam.c
*************** heap_lock_updated_tuple_rec(Relation rel
*** 5677,5682 ****
--- 5677,5683 ----
  				new_xmax;
  	TransactionId priorXmax = InvalidTransactionId;
  	bool		cleared_all_frozen = false;
+ 	bool		pinned_desired_page;
  	Buffer		vmbuffer = InvalidBuffer;
  	BlockNumber block;
  
*************** heap_lock_updated_tuple_rec(Relation rel
*** 5698,5704 ****
  			 * chain, and there's no further tuple to lock: return success to
  			 * caller.
  			 */
! 			return HeapTupleMayBeUpdated;
  		}
  
  l4:
--- 5699,5706 ----
  			 * chain, and there's no further tuple to lock: return success to
  			 * caller.
  			 */
! 			result = HeapTupleMayBeUpdated;
! 			goto out_unlocked;
  		}
  
  l4:
*************** l4:
*** 5710,5719 ****
  		 * someone else might be in the middle of changing this, so we'll need
  		 * to recheck after we have the lock.
  		 */
! 		if (PageIsAllVisible(BufferGetPage(buf)))
  			visibilitymap_pin(rel, block, &vmbuffer);
  		else
! 			vmbuffer = InvalidBuffer;
  
  		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
  
--- 5712,5724 ----
  		 * someone else might be in the middle of changing this, so we'll need
  		 * to recheck after we have the lock.
  		 */
! 		if (1) // (PageIsAllVisible(BufferGetPage(buf)))
! 		{
  			visibilitymap_pin(rel, block, &vmbuffer);
+ 			pinned_desired_page = true;
+ 		}
  		else
! 			pinned_desired_page = false;
  
  		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
  
*************** l4:
*** 5722,5729 ****
  		 * all visible while we were busy locking the buffer, we'll have to
  		 * unlock and re-lock, to avoid holding the buffer lock across I/O.
  		 * That's a bit unfortunate, but hopefully shouldn't happen often.
  		 */
! 		if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf)))
  		{
  			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
  			visibilitymap_pin(rel, block, &vmbuffer);
--- 5727,5739 ----
  		 * all visible while we were busy locking the buffer, we'll have to
  		 * unlock and re-lock, to avoid holding the buffer lock across I/O.
  		 * That's a bit unfortunate, but hopefully shouldn't happen often.
+ 		 *
+ 		 * Note: in some paths through this function, we will reach here
+ 		 * holding a pin on a vm page that may or may not be the one matching
+ 		 * this page.  If this page isn't all-visible, we won't use the vm
+ 		 * page, but we hold onto such a pin till the end of the function.
  		 */
! 		if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
  		{
  			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
  			visibilitymap_pin(rel, block, &vmbuffer);
*************** l4:
*** 5749,5756 ****
  		 */
  		if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
  		{
! 			UnlockReleaseBuffer(buf);
! 			return HeapTupleMayBeUpdated;
  		}
  
  		old_infomask = mytup.t_data->t_infomask;
--- 5759,5766 ----
  		 */
  		if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
  		{
! 			result = HeapTupleMayBeUpdated;
! 			goto out_locked;
  		}
  
  		old_infomask = mytup.t_data->t_infomask;
*************** next:
*** 5957,5964 ****
  		priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
  		ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
  		UnlockReleaseBuffer(buf);
- 		if (vmbuffer != InvalidBuffer)
- 			ReleaseBuffer(vmbuffer);
  	}
  
  	result = HeapTupleMayBeUpdated;
--- 5967,5972 ----
*************** next:
*** 5966,5976 ****
  out_locked:
  	UnlockReleaseBuffer(buf);
  
  	if (vmbuffer != InvalidBuffer)
  		ReleaseBuffer(vmbuffer);
  
  	return result;
- 
  }
  
  /*
--- 5974,5984 ----
  out_locked:
  	UnlockReleaseBuffer(buf);
  
+ out_unlocked:
  	if (vmbuffer != InvalidBuffer)
  		ReleaseBuffer(vmbuffer);
  
  	return result;
  }
  
  /*
