From 22d9613accc70eb2f9e799b87e976d64540f36b4 Mon Sep 17 00:00:00 2001
From: Yura Sokolov <y.sokolov@postgrespro.ru>
Date: Mon, 28 Feb 2022 12:19:17 +0300
Subject: [PATCH v4 2/2] Add HASH_REUSE+HASH_ASSIGN and use it in BufTable.

Avoid dynahash's freelist locking when BufferAlloc reuses buffer for
different tag.

HASH_REUSE acts as HASH_REMOVE, but stores element to reuse in static
variable instead of freelist partition. And HASH_ASSIGN then uses the
element.

Unfortunately, FreeListData->nentries had to be manipulated even in this
case. So instead of manipulation with nentries, we replace nentries with
nfree - actual length of free list, and nalloced - initially allocated
entries for free list. This were suggested by Robert Haas in
https://postgr.es/m/CA%2BTgmoZkg-04rcNRURt%3DjAG0Cs5oPyB-qKxH4wqX09e-oXy-nw%40mail.gmail.com
---
 src/backend/storage/buffer/buf_table.c |   9 +-
 src/backend/storage/buffer/bufmgr.c    |   4 +-
 src/backend/utils/hash/dynahash.c      | 130 ++++++++++++++++++++-----
 src/include/storage/buf_internals.h    |   2 +-
 src/include/utils/hsearch.h            |   4 +-
 5 files changed, 120 insertions(+), 29 deletions(-)

diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index caa03ae1233..3362c7127e9 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -128,7 +128,7 @@ BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
 		hash_search_with_hash_value(SharedBufHash,
 									(void *) tagPtr,
 									hashcode,
-									HASH_ENTER,
+									HASH_ASSIGN,
 									&found);
 
 	if (found)					/* found something already in the table */
@@ -143,10 +143,13 @@ BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
  * BufTableDelete
  *		Delete the hashtable entry for given tag (which must exist)
  *
+ * If reuse flag is true, deleted entry is cached for reuse, and caller
+ * must call BufTableInsert next.
+ *
  * Caller must hold exclusive lock on BufMappingLock for tag's partition
  */
 void
-BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
+BufTableDelete(BufferTag *tagPtr, uint32 hashcode, bool reuse)
 {
 	BufferLookupEnt *result;
 
@@ -154,7 +157,7 @@ BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
 		hash_search_with_hash_value(SharedBufHash,
 									(void *) tagPtr,
 									hashcode,
-									HASH_REMOVE,
+									reuse ? HASH_REUSE : HASH_REMOVE,
 									NULL);
 
 	if (!result)				/* shouldn't happen */
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 5d2781f4813..85b62463c0d 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1334,7 +1334,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
 
 	/* Delete old tag from hash table if it were valid. */
 	if (oldFlags & BM_TAG_VALID)
-		BufTableDelete(&oldTag, oldHash);
+		BufTableDelete(&oldTag, oldHash, true);
 
 	if (oldPartitionLock != newPartitionLock)
 	{
@@ -1528,7 +1528,7 @@ retry:
 	 * Remove the buffer from the lookup hashtable, if it was in there.
 	 */
 	if (oldFlags & BM_TAG_VALID)
-		BufTableDelete(&oldTag, oldHash);
+		BufTableDelete(&oldTag, oldHash, false);
 
 	/*
 	 * Done with mapping lock.
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 6546e3c7c79..9eb07593da7 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -14,7 +14,7 @@
  * a hash table in partitioned mode, the HASH_PARTITION flag must be given
  * to hash_create.  This prevents any attempt to split buckets on-the-fly.
  * Therefore, each hash bucket chain operates independently, and no fields
- * of the hash header change after init except nentries and freeList.
+ * of the hash header change after init except nfree and freeList.
  * (A partitioned table uses multiple copies of those fields, guarded by
  * spinlocks, for additional concurrency.)
  * This lets any subset of the hash buckets be treated as a separately
@@ -138,8 +138,9 @@ typedef HASHBUCKET *HASHSEGMENT;
  *
  * In a partitioned hash table, each freelist is associated with a specific
  * set of hashcodes, as determined by the FREELIST_IDX() macro below.
- * nentries tracks the number of live hashtable entries having those hashcodes
- * (NOT the number of entries in the freelist, as you might expect).
+ * nalloced tracks the number of free hashtable entries initially allocated
+ * for the freelist.
+ * nfree tracks the actual number of free hashtable entries in the freelist.
  *
  * The coverage of a freelist might be more or less than one partition, so it
  * needs its own lock rather than relying on caller locking.  Relying on that
@@ -147,13 +148,15 @@ typedef HASHBUCKET *HASHSEGMENT;
  * need to "borrow" entries from another freelist; see get_hash_entry().
  *
  * Using an array of FreeListData instead of separate arrays of mutexes,
- * nentries and freeLists helps to reduce sharing of cache lines between
+ * nfree and freeLists helps to reduce sharing of cache lines between
  * different mutexes.
  */
 typedef struct
 {
 	slock_t		mutex;			/* spinlock for this freelist */
-	long		nentries;		/* number of entries in associated buckets */
+	long		nfree;			/* number of free entries in the list */
+	long		nalloced;		/* number of entries initially allocated for
+								 * the list */
 	HASHELEMENT *freeList;		/* chain of free elements */
 } FreeListData;
 
@@ -170,7 +173,7 @@ struct HASHHDR
 	/*
 	 * The freelist can become a point of contention in high-concurrency hash
 	 * tables, so we use an array of freelists, each with its own mutex and
-	 * nentries count, instead of just a single one.  Although the freelists
+	 * nfree count, instead of just a single one.  Although the freelists
 	 * normally operate independently, we will scavenge entries from freelists
 	 * other than a hashcode's default freelist when necessary.
 	 *
@@ -254,6 +257,15 @@ struct HTAB
  */
 #define MOD(x,y)			   ((x) & ((y)-1))
 
+/*
+ * Struct for reuse element.
+ */
+struct HASHREUSE
+{
+	HTAB	   *hashp;
+	HASHBUCKET	element;
+};
+
 #ifdef HASH_STATISTICS
 static long hash_accesses,
 			hash_collisions,
@@ -293,6 +305,12 @@ DynaHashAlloc(Size size)
 }
 
 
+/*
+ * Support for HASH_REUSE + HASH_ASSIGN
+ */
+static struct HASHREUSE DynaHashReuse = {NULL, NULL};
+
+
 /*
  * HashCompareFunc for string keys
  *
@@ -932,6 +950,10 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
  *		HASH_ENTER: look up key in table, creating entry if not present
  *		HASH_ENTER_NULL: same, but return NULL if out of memory
  *		HASH_REMOVE: look up key in table, remove entry if present
+ *		HASH_REUSE: same as HASH_REMOVE, but stores removed element in static
+ *					variable instead of free list.
+ *		HASH_ASSIGN: same as HASH_ENTER, but reuses element stored by HASH_REUSE
+ *					if any.
  *
  * Return value is a pointer to the element found/entered/removed if any,
  * or NULL if no match was found.  (NB: in the case of the REMOVE action,
@@ -1000,7 +1022,8 @@ hash_search_with_hash_value(HTAB *hashp,
 		 * Can't split if running in partitioned mode, nor if frozen, nor if
 		 * table is the subject of any active hash_seq_search scans.
 		 */
-		if (hctl->freeList[0].nentries > (long) hctl->max_bucket &&
+		if (hctl->freeList[0].nfree == 0 &&
+			hctl->freeList[0].nalloced > (long) hctl->max_bucket &&
 			!IS_PARTITIONED(hctl) && !hashp->frozen &&
 			!has_seq_scans(hashp))
 			(void) expand_table(hashp);
@@ -1044,6 +1067,10 @@ hash_search_with_hash_value(HTAB *hashp,
 	if (foundPtr)
 		*foundPtr = (bool) (currBucket != NULL);
 
+	/* If HASH_REUSE were not called, HASH_ASSIGN falls back to HASH_ENTER */
+	if (action == HASH_ASSIGN && DynaHashReuse.element == NULL)
+		action = HASH_ENTER;
+
 	/*
 	 * OK, now what?
 	 */
@@ -1057,20 +1084,17 @@ hash_search_with_hash_value(HTAB *hashp,
 		case HASH_REMOVE:
 			if (currBucket != NULL)
 			{
-				/* if partitioned, must lock to touch nentries and freeList */
+				/* if partitioned, must lock to touch nfree and freeList */
 				if (IS_PARTITIONED(hctl))
 					SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
 
-				/* delete the record from the appropriate nentries counter. */
-				Assert(hctl->freeList[freelist_idx].nentries > 0);
-				hctl->freeList[freelist_idx].nentries--;
-
 				/* remove record from hash bucket's chain. */
 				*prevBucketPtr = currBucket->link;
 
 				/* add the record to the appropriate freelist. */
 				currBucket->link = hctl->freeList[freelist_idx].freeList;
 				hctl->freeList[freelist_idx].freeList = currBucket;
+				hctl->freeList[freelist_idx].nfree++;
 
 				if (IS_PARTITIONED(hctl))
 					SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
@@ -1116,6 +1140,7 @@ hash_search_with_hash_value(HTAB *hashp,
 							 errmsg("out of memory")));
 			}
 
+	link_element:
 			/* link into hashbucket chain */
 			*prevBucketPtr = currBucket;
 			currBucket->link = NULL;
@@ -1132,6 +1157,63 @@ hash_search_with_hash_value(HTAB *hashp,
 			 */
 
 			return (void *) ELEMENTKEY(currBucket);
+
+		case HASH_REUSE:
+			if (currBucket != NULL)
+			{
+				/* check there is no unfinished HASH_REUSE+HASH_ASSIGN pair */
+				Assert(DynaHashReuseHTAB == NULL);
+				Assert(DynaHashReuseElement == NULL);
+
+				/* remove record from hash bucket's chain. */
+				*prevBucketPtr = currBucket->link;
+
+				/* and store for HASH_ASSIGN */
+				DynaHashReuse.element = currBucket;
+				DynaHashReuse.hashp = hashp;
+
+				/* Caller should call HASH_ASSIGN as the very next step. */
+				return (void *) ELEMENTKEY(currBucket);
+			}
+			return NULL;
+
+		case HASH_ASSIGN:
+			/* check HASH_REUSE were called for same hash table */
+			Assert(DynaHashReuse.hashp == hashp);
+
+			/*
+			 * If existing element is found, need to put Reuse element to
+			 * original freelist. There is no much difference, which list to
+			 * put, since we migrate buckets between buckets.
+			 */
+			if (currBucket != NULL)
+			{
+
+				/* if partitioned, must lock to touch nfree and freeList */
+				if (IS_PARTITIONED(hctl))
+					SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
+
+				/* add the record to the appropriate freelist. */
+				DynaHashReuse.element->link = hctl->freeList[freelist_idx].freeList;
+				hctl->freeList[freelist_idx].freeList = DynaHashReuse.element;
+				hctl->freeList[freelist_idx].nfree++;
+
+				if (IS_PARTITIONED(hctl))
+					SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
+
+				DynaHashReuse.element = NULL;
+				DynaHashReuse.hashp = NULL;
+
+				return (void *) ELEMENTKEY(currBucket);
+			}
+
+			currBucket = DynaHashReuse.element;
+
+			DynaHashReuse.element = NULL;
+			DynaHashReuse.hashp = NULL;
+
+			/* reuse HASH_ENTER code */
+			goto link_element;
 	}
 
 	elog(ERROR, "unrecognized hash action code: %d", (int) action);
@@ -1301,7 +1383,7 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
 
 	for (;;)
 	{
-		/* if partitioned, must lock to touch nentries and freeList */
+		/* if partitioned, must lock to touch nfree and freeList */
 		if (IS_PARTITIONED(hctl))
 			SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
 
@@ -1347,13 +1429,9 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
 				if (newElement != NULL)
 				{
 					hctl->freeList[borrow_from_idx].freeList = newElement->link;
+					hctl->freeList[borrow_from_idx].nfree--;
 					SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
 
-					/* careful: count the new element in its proper freelist */
-					SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
-					hctl->freeList[freelist_idx].nentries++;
-					SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
-
 					return newElement;
 				}
 
@@ -1365,9 +1443,9 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
 		}
 	}
 
-	/* remove entry from freelist, bump nentries */
+	/* remove entry from freelist, decrease nfree */
 	hctl->freeList[freelist_idx].freeList = newElement->link;
-	hctl->freeList[freelist_idx].nentries++;
+	hctl->freeList[freelist_idx].nfree--;
 
 	if (IS_PARTITIONED(hctl))
 		SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
@@ -1382,7 +1460,10 @@ long
 hash_get_num_entries(HTAB *hashp)
 {
 	int			i;
-	long		sum = hashp->hctl->freeList[0].nentries;
+	long		sum = 0;
+
+	sum += hashp->hctl->freeList[0].nalloced;
+	sum -= hashp->hctl->freeList[0].nfree;
 
 	/*
 	 * We currently don't bother with acquiring the mutexes; it's only
@@ -1392,7 +1473,10 @@ hash_get_num_entries(HTAB *hashp)
 	if (IS_PARTITIONED(hashp->hctl))
 	{
 		for (i = 1; i < NUM_FREELISTS; i++)
-			sum += hashp->hctl->freeList[i].nentries;
+		{
+			sum += hashp->hctl->freeList[i].nalloced;
+			sum -= hashp->hctl->freeList[i].nfree;
+		}
 	}
 
 	return sum;
@@ -1739,6 +1823,8 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx)
 	/* freelist could be nonempty if two backends did this concurrently */
 	firstElement->link = hctl->freeList[freelist_idx].freeList;
 	hctl->freeList[freelist_idx].freeList = prevElement;
+	hctl->freeList[freelist_idx].nfree += nelem;
+	hctl->freeList[freelist_idx].nalloced += nelem;
 
 	if (IS_PARTITIONED(hctl))
 		SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 7c6653311a5..d35ee1b4108 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -328,7 +328,7 @@ extern void InitBufTable(int size);
 extern uint32 BufTableHashCode(BufferTag *tagPtr);
 extern int	BufTableLookup(BufferTag *tagPtr, uint32 hashcode);
 extern int	BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id);
-extern void BufTableDelete(BufferTag *tagPtr, uint32 hashcode);
+extern void BufTableDelete(BufferTag *tagPtr, uint32 hashcode, bool reuse);
 
 /* localbuf.c */
 extern PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr,
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index d7af0239c8c..ba21c5a65a1 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -113,7 +113,9 @@ typedef enum
 	HASH_FIND,
 	HASH_ENTER,
 	HASH_REMOVE,
-	HASH_ENTER_NULL
+	HASH_ENTER_NULL,
+	HASH_REUSE,
+	HASH_ASSIGN
 } HASHACTION;
 
 /* hash_seq status (should be considered an opaque type by callers) */
-- 
2.35.1

