From 1de9607c1b05fa916f9cd5910b1dbc8f8cb6df70 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <postgres@jeltef.nl>
Date: Thu, 4 Dec 2025 15:36:19 +0100
Subject: [PATCH v13 3/5] Use hash_make macros throughout the codebase

This shows how our code base looks when using the new APIs. This has
some typesafety, readability and maintanability benefits, but it also
introduces some backpatching problems. These backpatching problems
cannot be resolved by backporting the new hash_make macros, because some
of them require C11 (which we only require on master for now). I think
it's unlikely that we'll need to backpatch things in code that creates
hashtables though, so it could still be worth it to do this complete
refactor.

At the very least we should choose a few places where we use the new
macros to make sure they have coverage.

Do note that these new macros are not just quality-of-life, but they
also calculate the key and entry sizes automatically. Thus making them
less error prone to maintain in case of changes to the underlying
structs.
---
 contrib/dblink/dblink.c                       | 10 +--
 .../pg_stat_statements/pg_stat_statements.c   | 10 +--
 contrib/pg_trgm/trgm_regexp.c                 |  9 +--
 contrib/postgres_fdw/connection.c             | 10 +--
 contrib/postgres_fdw/shippable.c              | 10 ++-
 contrib/tablefunc/tablefunc.c                 | 17 ++---
 src/backend/access/common/heaptuple.c         | 13 +---
 src/backend/access/gist/gistbuild.c           | 11 +---
 src/backend/access/gist/gistbuildbuffers.c    | 10 +--
 src/backend/access/hash/hashpage.c            | 13 +---
 src/backend/access/heap/rewriteheap.c         | 39 ++++-------
 src/backend/access/transam/xlogprefetcher.c   |  8 +--
 src/backend/access/transam/xlogutils.c        | 13 ++--
 src/backend/catalog/pg_enum.c                 | 24 ++-----
 src/backend/catalog/pg_inherits.c             | 11 +---
 src/backend/catalog/storage.c                 | 21 ++----
 src/backend/commands/async.c                  | 49 ++++----------
 src/backend/commands/prepare.c                | 12 +---
 src/backend/commands/sequence.c               | 10 +--
 src/backend/commands/tablecmds.c              | 16 +----
 src/backend/executor/nodeModifyTable.c        | 10 +--
 src/backend/nodes/extensible.c                | 10 +--
 src/backend/optimizer/util/plancat.c          | 17 ++---
 src/backend/optimizer/util/predtest.c         |  9 +--
 src/backend/optimizer/util/relnode.c          | 13 +---
 src/backend/parser/parse_oper.c               | 10 ++-
 src/backend/partitioning/partdesc.c           |  9 +--
 src/backend/postmaster/autovacuum.c           | 22 ++-----
 src/backend/postmaster/checkpointer.c         | 12 +---
 .../replication/logical/applyparallelworker.c | 13 +---
 src/backend/replication/logical/relation.c    | 22 ++-----
 .../replication/logical/reorderbuffer.c       | 29 ++------
 src/backend/replication/logical/tablesync.c   | 10 ++-
 src/backend/replication/pgoutput/pgoutput.c   | 11 +---
 src/backend/storage/buffer/buf_table.c        | 15 ++---
 src/backend/storage/buffer/localbuf.c         | 11 +---
 src/backend/storage/file/reinit.c             |  8 +--
 src/backend/storage/ipc/shmem.c               | 26 +++++++-
 src/backend/storage/ipc/standby.c             | 20 ++----
 src/backend/storage/lmgr/lock.c               | 55 +++++-----------
 src/backend/storage/lmgr/lwlock.c             |  8 +--
 src/backend/storage/lmgr/predicate.c          | 66 ++++++++-----------
 src/backend/storage/smgr/smgr.c               | 10 ++-
 src/backend/storage/sync/sync.c               | 11 +---
 src/backend/tsearch/ts_typanalyze.c           | 13 +---
 src/backend/utils/activity/wait_event.c       | 23 +++----
 src/backend/utils/adt/array_typanalyze.c      | 23 ++-----
 src/backend/utils/adt/json.c                  | 15 +----
 src/backend/utils/adt/jsonfuncs.c             | 20 ++----
 src/backend/utils/adt/mcxtfuncs.c             | 11 +---
 src/backend/utils/adt/ri_triggers.c           | 29 ++++----
 src/backend/utils/adt/ruleutils.c             | 23 ++-----
 src/backend/utils/cache/attoptcache.c         | 16 ++---
 src/backend/utils/cache/evtcache.c            |  9 +--
 src/backend/utils/cache/funccache.c           | 14 ++--
 src/backend/utils/cache/relcache.c            | 19 ++----
 src/backend/utils/cache/relfilenumbermap.c    |  9 +--
 src/backend/utils/cache/spccache.c            |  9 +--
 src/backend/utils/cache/ts_cache.c            | 27 +++-----
 src/backend/utils/cache/typcache.c            | 33 ++++------
 src/backend/utils/fmgr/dfmgr.c                | 12 ++--
 src/backend/utils/fmgr/fmgr.c                 | 11 +---
 src/backend/utils/misc/guc.c                  | 14 ++--
 src/backend/utils/misc/injection_point.c      | 14 ++--
 src/backend/utils/mmgr/portalmem.c            | 10 +--
 src/backend/utils/time/combocid.c             | 13 +---
 src/pl/plperl/plperl.c                        | 32 +++------
 src/pl/plpgsql/src/pl_exec.c                  | 32 ++++-----
 src/pl/plpython/plpy_plpymodule.c             |  9 ++-
 src/pl/plpython/plpy_procedure.c              |  9 +--
 src/pl/tcl/pltcl.c                            | 19 ++----
 src/timezone/pgtz.c                           | 12 +---
 72 files changed, 376 insertions(+), 847 deletions(-)

diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 9798cb535bc..53a3a090d34 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -2548,13 +2548,9 @@ getConnectionByName(const char *name)
 static HTAB *
 createConnHash(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(remoteConnHashEnt);
-
-	return hash_create("Remote Con hash", NUMCONN, &ctl,
-					   HASH_ELEM | HASH_STRINGS);
+	return hash_make_cxt(remoteConnHashEnt, name,
+						 "Remote Con hash", NUMCONN,
+						 TopMemoryContext);
 }
 
 static remoteConn *
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 7975476b890..2af828a9991 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -518,7 +518,6 @@ static void
 pgss_shmem_startup(void)
 {
 	bool		found;
-	HASHCTL		info;
 	FILE	   *file = NULL;
 	FILE	   *qfile = NULL;
 	uint32		header;
@@ -558,12 +557,9 @@ pgss_shmem_startup(void)
 		pgss->stats.stats_reset = GetCurrentTimestamp();
 	}
 
-	info.keysize = sizeof(pgssHashKey);
-	info.entrysize = sizeof(pgssEntry);
-	pgss_hash = ShmemInitHash("pg_stat_statements hash",
-							  pgss_max, pgss_max,
-							  &info,
-							  HASH_ELEM | HASH_BLOBS);
+	pgss_hash = shmem_hash_make(pgssEntry, key,
+								"pg_stat_statements hash",
+								pgss_max, pgss_max);
 
 	LWLockRelease(AddinShmemInitLock);
 
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index b4eaeec6090..383ce6b31b1 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -896,7 +896,6 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result)
 static void
 transformGraph(TrgmNFA *trgmNFA)
 {
-	HASHCTL		hashCtl;
 	TrgmStateKey initkey;
 	TrgmState  *initstate;
 	ListCell   *lc;
@@ -908,13 +907,7 @@ transformGraph(TrgmNFA *trgmNFA)
 	trgmNFA->overflowed = false;
 
 	/* Create hashtable for states */
-	hashCtl.keysize = sizeof(TrgmStateKey);
-	hashCtl.entrysize = sizeof(TrgmState);
-	hashCtl.hcxt = CurrentMemoryContext;
-	trgmNFA->states = hash_create("Trigram NFA",
-								  1024,
-								  &hashCtl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	trgmNFA->states = hash_make(TrgmState, stateKey, "Trigram NFA", 1024);
 	trgmNFA->nstates = 0;
 
 	/* Create initial state: ambiguous prefix, NFA's initial state */
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 192f8011160..0faf7e69280 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -217,17 +217,13 @@ GetConnection(UserMapping *user, bool will_prep_stmt, PgFdwConnState **state)
 	/* First time through, initialize connection cache hashtable */
 	if (ConnectionHash == NULL)
 	{
-		HASHCTL		ctl;
-
 		if (pgfdw_we_get_result == 0)
 			pgfdw_we_get_result =
 				WaitEventExtensionNew("PostgresFdwGetResult");
 
-		ctl.keysize = sizeof(ConnCacheKey);
-		ctl.entrysize = sizeof(ConnCacheEntry);
-		ConnectionHash = hash_create("postgres_fdw connections", 8,
-									 &ctl,
-									 HASH_ELEM | HASH_BLOBS);
+		ConnectionHash = hash_make_cxt(ConnCacheEntry, key,
+									   "postgres_fdw connections", 8,
+									   TopMemoryContext);
 
 		/*
 		 * Register some callback functions that manage connection cleanup.
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index 250f54fea32..cba1c0967a9 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -28,6 +28,7 @@
 #include "postgres_fdw.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 
 /* Hash table for caching the results of shippability lookups */
@@ -91,13 +92,10 @@ InvalidateShippableCacheCallback(Datum arg, SysCacheIdentifier cacheid,
 static void
 InitializeShippableCache(void)
 {
-	HASHCTL		ctl;
-
 	/* Create the hash table. */
-	ctl.keysize = sizeof(ShippableCacheKey);
-	ctl.entrysize = sizeof(ShippableCacheEntry);
-	ShippableCacheHash =
-		hash_create("Shippability cache", 256, &ctl, HASH_ELEM | HASH_BLOBS);
+	ShippableCacheHash = hash_make_cxt(ShippableCacheEntry, key,
+									   "Shippability cache", 256,
+									   TopMemoryContext);
 
 	/* Set up invalidation callback on pg_foreign_server. */
 	CacheRegisterSyscacheCallback(FOREIGNSERVEROID,
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 31f70b7bc10..092fce069e0 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -707,24 +707,17 @@ static HTAB *
 load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
 {
 	HTAB	   *crosstab_hash;
-	HASHCTL		ctl;
 	int			ret;
 	uint64		proc;
 	MemoryContext SPIcontext;
 
-	/* initialize the category hash table */
-	ctl.keysize = MAX_CATNAME_LEN;
-	ctl.entrysize = sizeof(crosstab_HashEnt);
-	ctl.hcxt = per_query_ctx;
-
 	/*
-	 * use INIT_CATS, defined above as a guess of how many hash table entries
-	 * to create, initially
+	 * Initialize the category hash table. Use INIT_CATS, defined above as a
+	 * guess of how many hash table entries to create, initially.
 	 */
-	crosstab_hash = hash_create("crosstab hash",
-								INIT_CATS,
-								&ctl,
-								HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	crosstab_hash = hash_make_cxt(crosstab_HashEnt, internal_catname,
+								  "crosstab hash", INIT_CATS,
+								  per_query_ctx);
 
 	/* Connect to SPI manager */
 	SPI_connect();
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index f30346469ed..b72bdfde061 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -125,18 +125,9 @@ missing_match(const void *key1, const void *key2, Size keysize)
 static void
 init_missing_cache(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(missing_cache_key);
-	hash_ctl.entrysize = sizeof(missing_cache_key);
-	hash_ctl.hcxt = TopMemoryContext;
-	hash_ctl.hash = missing_hash;
-	hash_ctl.match = missing_match;
 	missing_cache =
-		hash_create("Missing Values Cache",
-					32,
-					&hash_ctl,
-					HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE);
+		hashset_make_fn_cxt(missing_cache_key, "Missing Values Cache", 32,
+							missing_hash, missing_match, TopMemoryContext);
 }
 
 /* ----------------------------------------------------------------
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 7f57c787f4c..09f11c459d7 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -1515,15 +1515,8 @@ typedef struct
 static void
 gistInitParentMap(GISTBuildState *buildstate)
 {
-	HASHCTL		hashCtl;
-
-	hashCtl.keysize = sizeof(BlockNumber);
-	hashCtl.entrysize = sizeof(ParentMapEntry);
-	hashCtl.hcxt = CurrentMemoryContext;
-	buildstate->parentMap = hash_create("gistbuild parent map",
-										1024,
-										&hashCtl,
-										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	buildstate->parentMap = hash_make(ParentMapEntry, childblkno,
+									  "gistbuild parent map", 1024);
 }
 
 static void
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 3213cf45aa6..029af8542f9 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -44,7 +44,6 @@ GISTBuildBuffers *
 gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
 {
 	GISTBuildBuffers *gfbb;
-	HASHCTL		hashCtl;
 
 	gfbb = palloc_object(GISTBuildBuffers);
 	gfbb->pagesPerBuffer = pagesPerBuffer;
@@ -72,13 +71,8 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
 	 * nodeBuffersTab hash is association between index blocks and it's
 	 * buffers.
 	 */
-	hashCtl.keysize = sizeof(BlockNumber);
-	hashCtl.entrysize = sizeof(GISTNodeBuffer);
-	hashCtl.hcxt = CurrentMemoryContext;
-	gfbb->nodeBuffersTab = hash_create("gistbuildbuffers",
-									   1024,
-									   &hashCtl,
-									   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	gfbb->nodeBuffersTab = hash_make(GISTNodeBuffer, nodeBlocknum,
+									 "gistbuildbuffers", 1024);
 
 	gfbb->bufferEmptyingQueue = NIL;
 
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 8099b0d021f..fd66c070ca7 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1360,7 +1360,6 @@ void
 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
 				   uint32 maxbucket, uint32 highmask, uint32 lowmask)
 {
-	HASHCTL		hash_ctl;
 	HTAB	   *tidhtab;
 	Buffer		bucket_nbuf = InvalidBuffer;
 	Buffer		nbuf;
@@ -1371,16 +1370,8 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
 	Bucket		nbucket;
 	bool		found;
 
-	/* Initialize hash tables used to track TIDs */
-	hash_ctl.keysize = sizeof(ItemPointerData);
-	hash_ctl.entrysize = sizeof(ItemPointerData);
-	hash_ctl.hcxt = CurrentMemoryContext;
-
-	tidhtab =
-		hash_create("bucket ctids",
-					256,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	/* Initialize hash tables used to track TIDs (with arbitrary initial size) */
+	tidhtab = hashset_make(ItemPointerData, "bucket ctids", 256);
 
 	bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
 
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index f707b102c72..f58b4b2b205 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -238,7 +238,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 	RewriteState state;
 	MemoryContext rw_cxt;
 	MemoryContext old_cxt;
-	HASHCTL		hash_ctl;
 
 	/*
 	 * To ease cleanup, make a separate context that will contain the
@@ -263,24 +262,19 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 	state->rs_cxt = rw_cxt;
 	state->rs_bulkstate = smgr_bulk_start_rel(new_heap, MAIN_FORKNUM);
 
-	/* Initialize hash tables used to track update chains */
-	hash_ctl.keysize = sizeof(TidHashKey);
-	hash_ctl.entrysize = sizeof(UnresolvedTupData);
-	hash_ctl.hcxt = state->rs_cxt;
-
+	/*
+	 * Initialize hash tables used to track update chains (with arbitrary
+	 * initial sizes)
+	 */
 	state->rs_unresolved_tups =
-		hash_create("Rewrite / Unresolved ctids",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
-	hash_ctl.entrysize = sizeof(OldToNewMappingData);
+		hash_make_cxt(UnresolvedTupData, key,
+					  "Rewrite / Unresolved ctids", 128,
+					  state->rs_cxt);
 
 	state->rs_old_new_tid_map =
-		hash_create("Rewrite / Old to new tid map",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(OldToNewMappingData, key,
+					  "Rewrite / Old to new tid map", 128,
+					  state->rs_cxt);
 
 	MemoryContextSwitchTo(old_cxt);
 
@@ -761,7 +755,6 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
 static void
 logical_begin_heap_rewrite(RewriteState state)
 {
-	HASHCTL		hash_ctl;
 	TransactionId logical_xmin;
 
 	/*
@@ -792,15 +785,11 @@ logical_begin_heap_rewrite(RewriteState state)
 	state->rs_begin_lsn = GetXLogInsertRecPtr();
 	state->rs_num_rewrite_mappings = 0;
 
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(RewriteMappingFile);
-	hash_ctl.hcxt = state->rs_cxt;
-
 	state->rs_logical_mappings =
-		hash_create("Logical rewrite mapping",
-					128,		/* arbitrary initial size */
-					&hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(RewriteMappingFile, xid,
+					  "Logical rewrite mapping",
+					  128,		/* arbitrary initial size */
+					  state->rs_cxt);
 }
 
 /*
diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c
index c235eca7c51..df348d5701e 100644
--- a/src/backend/access/transam/xlogprefetcher.c
+++ b/src/backend/access/transam/xlogprefetcher.c
@@ -364,15 +364,13 @@ XLogPrefetcher *
 XLogPrefetcherAllocate(XLogReaderState *reader)
 {
 	XLogPrefetcher *prefetcher;
-	HASHCTL		ctl;
 
 	prefetcher = palloc0_object(XLogPrefetcher);
 	prefetcher->reader = reader;
 
-	ctl.keysize = sizeof(RelFileLocator);
-	ctl.entrysize = sizeof(XLogPrefetcherFilter);
-	prefetcher->filter_table = hash_create("XLogPrefetcherFilterTable", 1024,
-										   &ctl, HASH_ELEM | HASH_BLOBS);
+	prefetcher->filter_table = hash_make_cxt(XLogPrefetcherFilter, rlocator,
+											 "XLogPrefetcherFilterTable", 1024,
+											 TopMemoryContext);
 	dlist_init(&prefetcher->filter_queue);
 
 	SharedStats->wal_distance = 0;
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 5fbe39133b8..d11e42c9490 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -27,6 +27,7 @@
 #include "storage/fd.h"
 #include "storage/smgr.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 #include "utils/rel.h"
 
 
@@ -131,15 +132,9 @@ log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno,
 	if (invalid_page_tab == NULL)
 	{
 		/* create hash table when first needed */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(xl_invalid_page_key);
-		ctl.entrysize = sizeof(xl_invalid_page);
-
-		invalid_page_tab = hash_create("XLOG invalid-page table",
-									   100,
-									   &ctl,
-									   HASH_ELEM | HASH_BLOBS);
+		invalid_page_tab = hash_make_cxt(xl_invalid_page, key,
+										 "XLOG invalid-page table", 100,
+										 TopMemoryContext);
 	}
 
 	/* we currently assume xl_invalid_page_key contains no padding */
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 33a461484d4..86c8bada557 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -267,15 +267,9 @@ EnumValuesDelete(Oid enumTypeOid)
 static void
 init_uncommitted_enum_types(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(Oid);
-	hash_ctl.hcxt = TopTransactionContext;
-	uncommitted_enum_types = hash_create("Uncommitted enum types",
-										 32,
-										 &hash_ctl,
-										 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	uncommitted_enum_types = hashset_make_cxt(Oid,
+											  "Uncommitted enum types", 32,
+											  TopTransactionContext);
 }
 
 /*
@@ -284,15 +278,9 @@ init_uncommitted_enum_types(void)
 static void
 init_uncommitted_enum_values(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(Oid);
-	hash_ctl.hcxt = TopTransactionContext;
-	uncommitted_enum_values = hash_create("Uncommitted enum values",
-										  32,
-										  &hash_ctl,
-										  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	uncommitted_enum_values = hashset_make_cxt(Oid,
+											   "Uncommitted enum values", 32,
+											   TopTransactionContext);
 }
 
 /*
diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c
index 4b9802aafcc..e36419786d5 100644
--- a/src/backend/catalog/pg_inherits.c
+++ b/src/backend/catalog/pg_inherits.c
@@ -257,19 +257,12 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
 	/* hash table for O(1) rel_oid -> rel_numparents cell lookup */
 	HTAB	   *seen_rels;
-	HASHCTL		ctl;
 	List	   *rels_list,
 			   *rel_numparents;
 	ListCell   *l;
 
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(SeenRelsEntry);
-	ctl.hcxt = CurrentMemoryContext;
-
-	seen_rels = hash_create("find_all_inheritors temporary table",
-							32, /* start small and extend */
-							&ctl,
-							HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	seen_rels = hash_make(SeenRelsEntry, rel_id,
+						  "find_all_inheritors temporary table", 32);
 
 	/*
 	 * We build a list starting with the given rel and adding all direct and
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index e443a4993c5..db3e08319b5 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -90,15 +90,9 @@ AddPendingSync(const RelFileLocator *rlocator)
 
 	/* create the hash if not yet */
 	if (!pendingSyncHash)
-	{
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(RelFileLocator);
-		ctl.entrysize = sizeof(PendingRelSync);
-		ctl.hcxt = TopTransactionContext;
-		pendingSyncHash = hash_create("pending sync hash", 16, &ctl,
-									  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-	}
+		pendingSyncHash = hash_make_cxt(PendingRelSync, rlocator,
+										"pending sync hash", 16,
+										TopTransactionContext);
 
 	pending = hash_search(pendingSyncHash, rlocator, HASH_ENTER, &found);
 	Assert(!found);
@@ -600,7 +594,6 @@ void
 SerializePendingSyncs(Size maxSize, char *startAddress)
 {
 	HTAB	   *tmphash;
-	HASHCTL		ctl;
 	HASH_SEQ_STATUS scan;
 	PendingRelSync *sync;
 	PendingRelDelete *delete;
@@ -611,12 +604,8 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 		goto terminate;
 
 	/* Create temporary hash to collect active relfilelocators */
-	ctl.keysize = sizeof(RelFileLocator);
-	ctl.entrysize = sizeof(RelFileLocator);
-	ctl.hcxt = CurrentMemoryContext;
-	tmphash = hash_create("tmp relfilelocators",
-						  hash_get_num_entries(pendingSyncHash), &ctl,
-						  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	tmphash = hashset_make(RelFileLocator, "tmp relfilelocators",
+						   hash_get_num_entries(pendingSyncHash));
 
 	/* collect all rlocator from pending syncs */
 	hash_seq_init(&scan, pendingSyncHash);
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 5c9a56c3d40..0c21f528498 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -737,21 +737,15 @@ initGlobalChannelTable(void)
 static void
 initLocalChannelTable(void)
 {
-	HASHCTL		hash_ctl;
-
 	/* Quick exit if we already did this */
 	if (localChannelTable != NULL)
 		return;
 
 	/* Initialize local hash table for this backend's listened channels */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(ChannelName);
-
 	localChannelTable =
-		hash_create("Local Listen Channels",
-					64,
-					&hash_ctl,
-					HASH_ELEM | HASH_STRINGS);
+		hash_make_cxt(ChannelName, channel, "Local Listen Channels",
+					  64,
+					  TopMemoryContext);
 }
 
 /*
@@ -763,20 +757,13 @@ initLocalChannelTable(void)
 static void
 initPendingListenActions(void)
 {
-	HASHCTL		hash_ctl;
-
 	if (pendingListenActions != NULL)
 		return;
 
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(PendingListenEntry);
-	hash_ctl.hcxt = CurTransactionContext;
-
 	pendingListenActions =
-		hash_create("Pending Listen Actions",
-					list_length(pendingActions->actions),
-					&hash_ctl,
-					HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+		hash_make_cxt(PendingListenEntry, channel, "Pending Listen Actions",
+					  list_length(pendingActions->actions),
+					  CurTransactionContext);
 }
 
 /*
@@ -3171,31 +3158,21 @@ AddEventToPendingNotifies(Notification *n)
 	if (list_length(pendingNotifies->events) >= MIN_HASHABLE_NOTIFIES &&
 		pendingNotifies->hashtab == NULL)
 	{
-		HASHCTL		hash_ctl;
 		ListCell   *l;
 
 		/* Create the hash table */
-		hash_ctl.keysize = sizeof(Notification *);
-		hash_ctl.entrysize = sizeof(struct NotificationHash);
-		hash_ctl.hash = notification_hash;
-		hash_ctl.match = notification_match;
-		hash_ctl.hcxt = CurTransactionContext;
 		pendingNotifies->hashtab =
-			hash_create("Pending Notifies",
-						256L,
-						&hash_ctl,
-						HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+			hash_make_fn_cxt(struct NotificationHash, event,
+							 "Pending Notifies", 256,
+							 notification_hash, notification_match,
+							 CurTransactionContext);
 
 		/* Create the unique channel name table */
 		Assert(pendingNotifies->uniqueChannelHash == NULL);
-		hash_ctl.keysize = NAMEDATALEN;
-		hash_ctl.entrysize = sizeof(ChannelName);
-		hash_ctl.hcxt = CurTransactionContext;
 		pendingNotifies->uniqueChannelHash =
-			hash_create("Pending Notify Channel Names",
-						64L,
-						&hash_ctl,
-						HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+			hash_make_cxt(ChannelName, channel, "Pending Notify Channel Names",
+						  64L,
+						  CurTransactionContext);
 
 		/* Insert all the already-existing events */
 		foreach(l, pendingNotifies->events)
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 876aad2100a..0ab08cceb9c 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -373,15 +373,9 @@ EvaluateParams(ParseState *pstate, PreparedStatement *pstmt, List *params,
 static void
 InitQueryHashTable(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(PreparedStatement);
-
-	prepared_queries = hash_create("Prepared Queries",
-								   32,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_STRINGS);
+	prepared_queries = hash_make_cxt(PreparedStatement, stmt_name,
+									 "Prepared Queries", 32,
+									 TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 551667650ba..af7a29df1f6 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -1113,13 +1113,9 @@ lock_and_open_sequence(SeqTable seq)
 static void
 create_seq_hashtable(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(SeqTableData);
-
-	seqhashtab = hash_create("Sequence values", 16, &ctl,
-							 HASH_ELEM | HASH_BLOBS);
+	seqhashtab = hash_make_cxt(SeqTableData, relid,
+							   "Sequence values", 16,
+							   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 0ce2e81f9c2..cb7e00ad03f 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2193,19 +2193,9 @@ ExecuteTruncateGuts(List *explicit_rels,
 
 			/* First time through, initialize hashtable for foreign tables */
 			if (!ft_htab)
-			{
-				HASHCTL		hctl;
-
-				memset(&hctl, 0, sizeof(HASHCTL));
-				hctl.keysize = sizeof(Oid);
-				hctl.entrysize = sizeof(ForeignTruncateInfo);
-				hctl.hcxt = CurrentMemoryContext;
-
-				ft_htab = hash_create("TRUNCATE for Foreign Tables",
-									  32,	/* start small and extend */
-									  &hctl,
-									  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-			}
+				ft_htab = hash_make(ForeignTruncateInfo, serverid,
+									"TRUNCATE for Foreign Tables",
+									32);	/* start small and extend */
 
 			/* Find or create cached entry for the foreign table */
 			ft_info = hash_search(ft_htab, &serverid, HASH_ENTER, &found);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index dfd7b33aa9b..aa93b4f3656 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -5700,15 +5700,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 #endif
 	if (nrels >= MT_NRELS_HASH)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(MTTargetRelLookup);
-		hash_ctl.hcxt = CurrentMemoryContext;
 		mtstate->mt_resultOidHash =
-			hash_create("ModifyTable target hash",
-						nrels, &hash_ctl,
-						HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+			hash_make(MTTargetRelLookup, relationOid,
+					  "ModifyTable target hash", nrels);
 		for (i = 0; i < nrels; i++)
 		{
 			Oid			hashkey;
diff --git a/src/backend/nodes/extensible.c b/src/backend/nodes/extensible.c
index 0d43d66c1cd..bf0b94d3c72 100644
--- a/src/backend/nodes/extensible.c
+++ b/src/backend/nodes/extensible.c
@@ -22,6 +22,7 @@
 
 #include "nodes/extensible.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 static HTAB *extensible_node_methods = NULL;
 static HTAB *custom_scan_methods = NULL;
@@ -45,13 +46,8 @@ RegisterExtensibleNodeEntry(HTAB **p_htable, const char *htable_label,
 
 	if (*p_htable == NULL)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = EXTNODENAME_MAX_LEN;
-		ctl.entrysize = sizeof(ExtensibleNodeEntry);
-
-		*p_htable = hash_create(htable_label, 100, &ctl,
-								HASH_ELEM | HASH_STRINGS);
+		*p_htable = hash_make_cxt(ExtensibleNodeEntry, extnodename,
+								  htable_label, 100, TopMemoryContext);
 	}
 
 	if (strlen(extnodename) >= EXTNODENAME_MAX_LEN)
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 7c4be174869..80fc867da76 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -702,19 +702,10 @@ get_relation_notnullatts(PlannerInfo *root, Relation relation)
 	/* create the hash table if it hasn't been created yet */
 	if (root->glob->rel_notnullatts_hash == NULL)
 	{
-		HTAB	   *hashtab;
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(NotnullHashEntry);
-		hash_ctl.hcxt = CurrentMemoryContext;
-
-		hashtab = hash_create("Relation NOT NULL attnums",
-							  64L,	/* arbitrary initial size */
-							  &hash_ctl,
-							  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
-		root->glob->rel_notnullatts_hash = hashtab;
+		root->glob->rel_notnullatts_hash =
+			hash_make(NotnullHashEntry, relid,
+					  "Relation NOT NULL attnums",
+					  64L);		/* arbitrary initial size */
 	}
 
 	/*
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 690a23d619a..0cf77394abc 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -2119,12 +2119,9 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it)
 	if (OprProofCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(OprProofCacheKey);
-		ctl.entrysize = sizeof(OprProofCacheEntry);
-		OprProofCacheHash = hash_create("Btree proof lookup cache", 256,
-										&ctl, HASH_ELEM | HASH_BLOBS);
+		OprProofCacheHash = hash_make_cxt(OprProofCacheEntry, key,
+										  "Btree proof lookup cache", 256,
+										  TopMemoryContext);
 
 		/* Arrange to flush cache on pg_amop changes */
 		CacheRegisterSyscacheCallback(AMOPOPID,
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 91bcda34a37..c6864586962 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -616,19 +616,12 @@ static void
 build_join_rel_hash(PlannerInfo *root)
 {
 	HTAB	   *hashtab;
-	HASHCTL		hash_ctl;
 	ListCell   *l;
 
 	/* Create the hash table */
-	hash_ctl.keysize = sizeof(Relids);
-	hash_ctl.entrysize = sizeof(JoinHashEntry);
-	hash_ctl.hash = bitmap_hash;
-	hash_ctl.match = bitmap_match;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	hashtab = hash_create("JoinRelHashTable",
-						  256L,
-						  &hash_ctl,
-						  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	hashtab = hash_make_fn(JoinHashEntry, join_relids,
+						   "JoinRelHashTable", 256,
+						   bitmap_hash, bitmap_match);
 
 	/* Insert all the already-existing joinrels */
 	foreach(l, root->join_rel_list)
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 2f218c1ab8b..d1dd342d940 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -28,6 +28,7 @@
 #include "utils/hsearch.h"
 #include "utils/inval.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 #include "utils/typcache.h"
 
@@ -1030,12 +1031,9 @@ find_oper_cache_entry(OprCacheKey *key)
 	if (OprCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(OprCacheKey);
-		ctl.entrysize = sizeof(OprCacheEntry);
-		OprCacheHash = hash_create("Operator lookup cache", 256,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+		OprCacheHash = hash_make_cxt(OprCacheEntry, key,
+									 "Operator lookup cache", 256,
+									 TopMemoryContext);
 
 		/* Arrange to flush cache on pg_operator and pg_cast changes */
 		CacheRegisterSyscacheCallback(OPERNAMENSP,
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index c3d275f8726..db26e4a82b6 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -424,17 +424,12 @@ CreatePartitionDirectory(MemoryContext mcxt, bool omit_detached)
 {
 	MemoryContext oldcontext = MemoryContextSwitchTo(mcxt);
 	PartitionDirectory pdir;
-	HASHCTL		ctl;
 
 	pdir = palloc_object(PartitionDirectoryData);
 	pdir->pdir_mcxt = mcxt;
 
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(PartitionDirectoryEntry);
-	ctl.hcxt = mcxt;
-
-	pdir->pdir_hash = hash_create("partition directory", 256, &ctl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	pdir->pdir_hash = hash_make_cxt(PartitionDirectoryEntry, reloid,
+									"partition directory", 256, mcxt);
 	pdir->omit_detached = omit_detached;
 
 	MemoryContextSwitchTo(oldcontext);
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 6694f485216..4cbb7ebf18c 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -934,7 +934,6 @@ rebuild_database_list(Oid newdb)
 	MemoryContext newcxt;
 	MemoryContext oldcxt;
 	MemoryContext tmpcxt;
-	HASHCTL		hctl;
 	int			score;
 	int			nelems;
 	HTAB	   *dbhash;
@@ -964,12 +963,10 @@ rebuild_database_list(Oid newdb)
 	 * score, and finally put the array elements into the new doubly linked
 	 * list.
 	 */
-	hctl.keysize = sizeof(Oid);
-	hctl.entrysize = sizeof(avl_dbase);
-	hctl.hcxt = tmpcxt;
-	dbhash = hash_create("autovacuum db hash", 20, &hctl,	/* magic number here
-															 * FIXME */
-						 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	dbhash = hash_make_cxt(avl_dbase, adl_datid,
+						   "autovacuum db hash",
+						   20,	/* magic number here FIXME */
+						   tmpcxt);
 
 	/* start by inserting the new database */
 	score = 0;
@@ -1924,7 +1921,6 @@ do_autovacuum(void)
 	Form_pg_database dbForm;
 	List	   *tables_to_process = NIL;
 	List	   *orphan_oids = NIL;
-	HASHCTL		ctl;
 	HTAB	   *table_toast_map;
 	ListCell   *volatile cell;
 	BufferAccessStrategy bstrategy;
@@ -1997,13 +1993,9 @@ do_autovacuum(void)
 	pg_class_desc = CreateTupleDescCopy(RelationGetDescr(classRel));
 
 	/* create hash table for toast <-> main relid mapping */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(av_relation);
-
-	table_toast_map = hash_create("TOAST to main relid map",
-								  100,
-								  &ctl,
-								  HASH_ELEM | HASH_BLOBS);
+	table_toast_map = hash_make_cxt(av_relation, ar_toastrelid,
+									"TOAST to main relid map", 100,
+									TopMemoryContext);
 
 	/*
 	 * Scan pg_class to determine which tables to vacuum.
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 3c982c6ffac..a4af47b0866 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -1309,7 +1309,6 @@ CompactCheckpointerRequestQueue(void)
 	int			num_requests;
 	int			read_idx,
 				write_idx;
-	HASHCTL		ctl;
 	HTAB	   *htab;
 	bool	   *skip_slot;
 
@@ -1329,14 +1328,9 @@ CompactCheckpointerRequestQueue(void)
 	head = CheckpointerShmem->head;
 
 	/* Initialize temporary hash table */
-	ctl.keysize = sizeof(CheckpointerRequest);
-	ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
-	ctl.hcxt = CurrentMemoryContext;
-
-	htab = hash_create("CompactCheckpointerRequestQueue",
-					   CheckpointerShmem->num_requests,
-					   &ctl,
-					   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	htab = hash_make(struct CheckpointerSlotMapping, request,
+					 "CompactCheckpointerRequestQueue",
+					 CheckpointerShmem->num_requests);
 
 	/*
 	 * The basic idea here is that a request can be skipped if it's followed
diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index d78693ffa8e..507757db7eb 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -487,16 +487,9 @@ pa_allocate_worker(TransactionId xid)
 	/* First time through, initialize parallel apply worker state hashtable. */
 	if (!ParallelApplyTxnHash)
 	{
-		HASHCTL		ctl;
-
-		MemSet(&ctl, 0, sizeof(ctl));
-		ctl.keysize = sizeof(TransactionId);
-		ctl.entrysize = sizeof(ParallelApplyWorkerEntry);
-		ctl.hcxt = ApplyContext;
-
-		ParallelApplyTxnHash = hash_create("logical replication parallel apply workers hash",
-										   16, &ctl,
-										   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		ParallelApplyTxnHash = hash_make_cxt(ParallelApplyWorkerEntry, xid,
+											 "logical replication parallel apply workers hash",
+											 16, ApplyContext);
 	}
 
 	/* Create an entry for the requested transaction. */
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index 0b1d80b5b0f..fcf295f1df1 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -105,8 +105,6 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 static void
 logicalrep_relmap_init(void)
 {
-	HASHCTL		ctl;
-
 	if (!LogicalRepRelMapContext)
 		LogicalRepRelMapContext =
 			AllocSetContextCreate(CacheMemoryContext,
@@ -114,12 +112,9 @@ logicalrep_relmap_init(void)
 								  ALLOCSET_DEFAULT_SIZES);
 
 	/* Initialize the relation hash table. */
-	ctl.keysize = sizeof(LogicalRepRelId);
-	ctl.entrysize = sizeof(LogicalRepRelMapEntry);
-	ctl.hcxt = LogicalRepRelMapContext;
-
-	LogicalRepRelMap = hash_create("logicalrep relation map cache", 128, &ctl,
-								   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	LogicalRepRelMap = hash_make_cxt(LogicalRepRelMapEntry, remoterel.remoteid,
+									 "logicalrep relation map cache", 128,
+									 LogicalRepRelMapContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
@@ -611,8 +606,6 @@ logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
 static void
 logicalrep_partmap_init(void)
 {
-	HASHCTL		ctl;
-
 	if (!LogicalRepPartMapContext)
 		LogicalRepPartMapContext =
 			AllocSetContextCreate(CacheMemoryContext,
@@ -620,12 +613,9 @@ logicalrep_partmap_init(void)
 								  ALLOCSET_DEFAULT_SIZES);
 
 	/* Initialize the relation hash table. */
-	ctl.keysize = sizeof(Oid);	/* partition OID */
-	ctl.entrysize = sizeof(LogicalRepPartMapEntry);
-	ctl.hcxt = LogicalRepPartMapContext;
-
-	LogicalRepPartMap = hash_create("logicalrep partition map cache", 64, &ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	LogicalRepPartMap = hash_make_cxt(LogicalRepPartMapEntry, partoid,
+									  "logicalrep partition map cache", 64,
+									  LogicalRepPartMapContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(logicalrep_partmap_invalidate_cb,
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 682d13c9f22..9fdb5d4d152 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -325,7 +325,6 @@ ReorderBuffer *
 ReorderBufferAllocate(void)
 {
 	ReorderBuffer *buffer;
-	HASHCTL		hash_ctl;
 	MemoryContext new_ctx;
 
 	Assert(MyReplicationSlot != NULL);
@@ -338,8 +337,6 @@ ReorderBufferAllocate(void)
 	buffer =
 		(ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer));
 
-	memset(&hash_ctl, 0, sizeof(hash_ctl));
-
 	buffer->context = new_ctx;
 
 	buffer->change_context = SlabContextCreate(new_ctx,
@@ -368,12 +365,8 @@ ReorderBufferAllocate(void)
 												  SLAB_DEFAULT_BLOCK_SIZE,
 												  SLAB_DEFAULT_BLOCK_SIZE);
 
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
-	hash_ctl.hcxt = buffer->context;
-
-	buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl,
-								 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	buffer->by_txn = hash_make_cxt(ReorderBufferTXNByIdEnt, xid,
+								   "ReorderBufferByXid", 1000, buffer->context);
 
 	buffer->by_txn_last_xid = InvalidTransactionId;
 	buffer->by_txn_last_txn = NULL;
@@ -1837,22 +1830,17 @@ static void
 ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
 	dlist_iter	iter;
-	HASHCTL		hash_ctl;
 
 	if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids))
 		return;
 
-	hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
-	hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
-	hash_ctl.hcxt = rb->context;
-
 	/*
 	 * create the hash with the exact number of to-be-stored tuplecids from
 	 * the start
 	 */
 	txn->tuplecid_hash =
-		hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(ReorderBufferTupleCidEnt, key,
+					  "ReorderBufferTupleCid", txn->ntuplecids, rb->context);
 
 	dlist_foreach(iter, &txn->tuplecids)
 	{
@@ -4972,15 +4960,10 @@ StartupReorderBuffer(void)
 static void
 ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
-	HASHCTL		hash_ctl;
-
 	Assert(txn->toast_hash == NULL);
 
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
-	hash_ctl.hcxt = rb->context;
-	txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl,
-								  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	txn->toast_hash = hash_make_cxt(ReorderBufferToastEnt, chunk_id,
+									"ReorderBufferToastHash", 5, rb->context);
 }
 
 /*
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index f49a4852ecb..a83e509f200 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -118,6 +118,7 @@
 #include "utils/array.h"
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/rls.h"
 #include "utils/snapmgr.h"
 #include "utils/syscache.h"
@@ -390,12 +391,9 @@ ProcessSyncingTablesForApply(XLogRecPtr current_lsn)
 	 */
 	if (table_states_not_ready != NIL && !last_start_times)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(struct tablesync_start_time_mapping);
-		last_start_times = hash_create("Logical replication table sync worker start times",
-									   256, &ctl, HASH_ELEM | HASH_BLOBS);
+		last_start_times = hash_make_cxt(struct tablesync_start_time_mapping, relid,
+										 "Logical replication table sync worker start times",
+										 256, TopMemoryContext);
 	}
 
 	/*
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 4ecfcbff7ab..6be2ae090cf 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -1974,7 +1974,6 @@ pgoutput_stream_prepare_txn(LogicalDecodingContext *ctx,
 static void
 init_rel_sync_cache(MemoryContext cachectx)
 {
-	HASHCTL		ctl;
 	static bool relation_callbacks_registered = false;
 
 	/* Nothing to do if hash table already exists */
@@ -1982,13 +1981,9 @@ init_rel_sync_cache(MemoryContext cachectx)
 		return;
 
 	/* Make a new hash table for the cache */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RelationSyncEntry);
-	ctl.hcxt = cachectx;
-
-	RelationSyncCache = hash_create("logical replication output relation cache",
-									128, &ctl,
-									HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
+	RelationSyncCache = hash_make_cxt(RelationSyncEntry, relid,
+									  "logical replication output relation cache",
+									  128, cachectx);
 
 	Assert(RelationSyncCache != NULL);
 
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 23d85fd32e2..95653944a9d 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -50,19 +50,16 @@ BufTableShmemSize(int size)
 void
 InitBufTable(int size)
 {
-	HASHCTL		info;
+	HASHOPTS	opts = {0};
 
 	/* assume no locking is needed yet */
 
 	/* BufferTag maps to Buffer */
-	info.keysize = sizeof(BufferTag);
-	info.entrysize = sizeof(BufferLookupEnt);
-	info.num_partitions = NUM_BUFFER_PARTITIONS;
-
-	SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table",
-								  size, size,
-								  &info,
-								  HASH_ELEM | HASH_BLOBS | HASH_PARTITION | HASH_FIXED_SIZE);
+	opts.num_partitions = NUM_BUFFER_PARTITIONS;
+	opts.fixed_size = true;
+	SharedBufHash = shmem_hash_make_ext(BufferLookupEnt, key,
+										"Shared Buffer Lookup Table",
+										size, size, &opts);
 }
 
 /*
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 396da84b25c..6f401e9656e 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -744,7 +744,6 @@ static void
 InitLocalBuffers(void)
 {
 	int			nbufs = num_temp_buffers;
-	HASHCTL		info;
 	int			i;
 
 	/*
@@ -795,13 +794,9 @@ InitLocalBuffers(void)
 	}
 
 	/* Create the lookup hash table */
-	info.keysize = sizeof(BufferTag);
-	info.entrysize = sizeof(LocalBufferLookupEnt);
-
-	LocalBufHash = hash_create("Local Buffer Lookup Table",
-							   nbufs,
-							   &info,
-							   HASH_ELEM | HASH_BLOBS);
+	LocalBufHash = hash_make_cxt(LocalBufferLookupEnt, key,
+								 "Local Buffer Lookup Table", nbufs,
+								 TopMemoryContext);
 
 	if (!LocalBufHash)
 		elog(ERROR, "could not initialize local buffer hash table");
diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c
index 25fa2151309..26e810e6b50 100644
--- a/src/backend/storage/file/reinit.c
+++ b/src/backend/storage/file/reinit.c
@@ -175,7 +175,6 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
 	if ((op & UNLOGGED_RELATION_CLEANUP) != 0)
 	{
 		HTAB	   *hash;
-		HASHCTL		ctl;
 
 		/*
 		 * It's possible that someone could create a ton of unlogged relations
@@ -184,11 +183,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
 		 * need to be reset.  Otherwise, this cleanup operation would be
 		 * O(n^2).
 		 */
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(unlogged_relation_entry);
-		ctl.hcxt = CurrentMemoryContext;
-		hash = hash_create("unlogged relation OIDs", 32, &ctl,
-						   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash = hash_make(unlogged_relation_entry, relnumber,
+						 "unlogged relation OIDs", 32);
 
 		/* Scan the directory. */
 		dbspace_dir = AllocateDir(dbspacedirname);
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index a5b7360e2dc..65128b0f508 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -179,8 +179,8 @@ InitShmemAllocator(PGShmemHeader *seghdr)
 	 * Create (or attach to) the shared memory index of shmem areas.
 	 *
 	 * This is the same initialization as ShmemInitHash() does, but we cannot
-	 * use ShmemInitHash() here because it relies on ShmemIndex being already
-	 * initialized.
+	 * use ShmemInitHash() nor shmem_hash_make() here because it relies on
+	 * ShmemIndex being already initialized.
 	 */
 	info.keysize = SHMEM_INDEX_KEYSIZE;
 	info.entrysize = sizeof(ShmemIndexEnt);
@@ -388,6 +388,28 @@ ShmemInitHash(const char *name,		/* table string name for shmem index */
 	return hash_create(name, init_size, infoP, hash_flags);
 }
 
+/*
+ * Implementation function for shmem_hash_make macros.
+ *
+ * Creates a shared memory hash table with simplified parameters.
+ * Pass NULL for opts to use all defaults.
+ */
+HTAB *
+shmem_hash_make_impl(const char *name, int64 init_size, int64 max_size,
+					 Size keysize, Size entrysize, bool string_key,
+					 const HASHOPTS *opts)
+{
+	HASHCTL		ctl;
+	int			flags;
+
+	/* Shared memory hash tables use ShmemAllocNoError, not a custom allocator */
+	Assert(opts == NULL || opts->alloc == NULL);
+
+	hash_opts_init(&ctl, &flags, keysize, entrysize, string_key, opts);
+
+	return ShmemInitHash(name, init_size, max_size, &ctl, flags);
+}
+
 /*
  * ShmemInitStruct -- Create/attach to a structure in shared memory.
  *
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index de9092fdf5b..65264999c8a 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -32,6 +32,7 @@
 #include "storage/standby.h"
 #include "utils/hsearch.h"
 #include "utils/injection_point.h"
+#include "utils/memutils.h"
 #include "utils/ps_status.h"
 #include "utils/timeout.h"
 #include "utils/timestamp.h"
@@ -96,7 +97,6 @@ void
 InitRecoveryTransactionEnvironment(void)
 {
 	VirtualTransactionId vxid;
-	HASHCTL		hash_ctl;
 
 	Assert(RecoveryLockHash == NULL);	/* don't run this twice */
 
@@ -104,18 +104,12 @@ InitRecoveryTransactionEnvironment(void)
 	 * Initialize the hash tables for tracking the locks held by each
 	 * transaction.
 	 */
-	hash_ctl.keysize = sizeof(xl_standby_lock);
-	hash_ctl.entrysize = sizeof(RecoveryLockEntry);
-	RecoveryLockHash = hash_create("RecoveryLockHash",
-								   64,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_BLOBS);
-	hash_ctl.keysize = sizeof(TransactionId);
-	hash_ctl.entrysize = sizeof(RecoveryLockXidEntry);
-	RecoveryLockXidHash = hash_create("RecoveryLockXidHash",
-									  64,
-									  &hash_ctl,
-									  HASH_ELEM | HASH_BLOBS);
+	RecoveryLockHash = hash_make_cxt(RecoveryLockEntry, key,
+									 "RecoveryLockHash", 64,
+									 TopMemoryContext);
+	RecoveryLockXidHash = hash_make_cxt(RecoveryLockXidEntry, xid,
+										"RecoveryLockXidHash", 64,
+										TopMemoryContext);
 
 	/*
 	 * Initialize shared invalidation management for Startup process, being
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 234643e4dd7..1d5ea7c051c 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -444,7 +444,7 @@ static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
 void
 LockManagerShmemInit(void)
 {
-	HASHCTL		info;
+	HASHOPTS	opts;
 	int64		init_table_size,
 				max_table_size;
 	bool		found;
@@ -460,15 +460,11 @@ LockManagerShmemInit(void)
 	 * Allocate hash table for LOCK structs.  This stores per-locked-object
 	 * information.
 	 */
-	info.keysize = sizeof(LOCKTAG);
-	info.entrysize = sizeof(LOCK);
-	info.num_partitions = NUM_LOCK_PARTITIONS;
-
-	LockMethodLockHash = ShmemInitHash("LOCK hash",
-									   init_table_size,
-									   max_table_size,
-									   &info,
-									   HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.num_partitions = NUM_LOCK_PARTITIONS;
+	LockMethodLockHash = shmem_hash_make_ext(LOCK, tag, "LOCK hash",
+											 init_table_size, max_table_size,
+											 &opts);
 
 	/* Assume an average of 2 holders per lock */
 	max_table_size *= 2;
@@ -478,16 +474,12 @@ LockManagerShmemInit(void)
 	 * Allocate hash table for PROCLOCK structs.  This stores
 	 * per-lock-per-holder information.
 	 */
-	info.keysize = sizeof(PROCLOCKTAG);
-	info.entrysize = sizeof(PROCLOCK);
-	info.hash = proclock_hash;
-	info.num_partitions = NUM_LOCK_PARTITIONS;
-
-	LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
-										   init_table_size,
-										   max_table_size,
-										   &info,
-										   HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.hash = proclock_hash;
+	opts.num_partitions = NUM_LOCK_PARTITIONS;
+	LockMethodProcLockHash = shmem_hash_make_ext(PROCLOCK, tag, "PROCLOCK hash",
+												 init_table_size, max_table_size,
+												 &opts);
 
 	/*
 	 * Allocate fast-path structures.
@@ -509,15 +501,9 @@ InitLockManagerAccess(void)
 	 * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
 	 * counts and resource owner information.
 	 */
-	HASHCTL		info;
-
-	info.keysize = sizeof(LOCALLOCKTAG);
-	info.entrysize = sizeof(LOCALLOCK);
-
-	LockMethodLocalHash = hash_create("LOCALLOCK hash",
-									  16,
-									  &info,
-									  HASH_ELEM | HASH_BLOBS);
+	LockMethodLocalHash = hash_make_cxt(LOCALLOCK, tag,
+										"LOCALLOCK hash", 16,
+										TopMemoryContext);
 }
 
 
@@ -3406,20 +3392,13 @@ CheckForSessionAndXactLocks(void)
 		bool		xactLock;	/* is any lockmode held at xact level? */
 	} PerLockTagEntry;
 
-	HASHCTL		hash_ctl;
 	HTAB	   *lockhtab;
 	HASH_SEQ_STATUS status;
 	LOCALLOCK  *locallock;
 
 	/* Create a local hash table keyed by LOCKTAG only */
-	hash_ctl.keysize = sizeof(LOCKTAG);
-	hash_ctl.entrysize = sizeof(PerLockTagEntry);
-	hash_ctl.hcxt = CurrentMemoryContext;
-
-	lockhtab = hash_create("CheckForSessionAndXactLocks table",
-						   256, /* arbitrary initial size */
-						   &hash_ctl,
-						   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	lockhtab = hash_make(PerLockTagEntry, lock,
+						 "CheckForSessionAndXactLocks table", 256);
 
 	/* Scan local lock table to find entries for each LOCKTAG */
 	hash_seq_init(&status, LockMethodLocalHash);
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5cb696490d6..8fea811ecbe 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -294,7 +294,6 @@ static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
 static void
 init_lwlock_stats(void)
 {
-	HASHCTL		ctl;
 	static MemoryContext lwlock_stats_cxt = NULL;
 	static bool exit_registered = false;
 
@@ -314,11 +313,8 @@ init_lwlock_stats(void)
 											 ALLOCSET_DEFAULT_SIZES);
 	MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
 
-	ctl.keysize = sizeof(lwlock_stats_key);
-	ctl.entrysize = sizeof(lwlock_stats);
-	ctl.hcxt = lwlock_stats_cxt;
-	lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	lwlock_stats_htab = hash_make_cxt(lwlock_stats, key,
+									  "lwlock stats", 16384, lwlock_stats_cxt);
 	if (!exit_registered)
 	{
 		on_shmem_exit(print_lwlock_stats, 0);
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index ae0e96aee5f..447b1e39804 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -212,6 +212,7 @@
 #include "storage/proc.h"
 #include "storage/procarray.h"
 #include "utils/guc_hooks.h"
+#include "utils/memutils.h"
 #include "utils/rel.h"
 #include "utils/snapmgr.h"
 #include "utils/wait_event.h"
@@ -1155,7 +1156,7 @@ CheckPointPredicate(void)
 void
 PredicateLockShmemInit(void)
 {
-	HASHCTL		info;
+	HASHOPTS	opts;
 	int64		max_predicate_lock_targets;
 	int64		max_predicate_locks;
 	int64		max_serializable_xacts;
@@ -1177,16 +1178,13 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for PREDICATELOCKTARGET structs.  This stores
 	 * per-predicate-lock-target information.
 	 */
-	info.keysize = sizeof(PREDICATELOCKTARGETTAG);
-	info.entrysize = sizeof(PREDICATELOCKTARGET);
-	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-
-	PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
-											max_predicate_lock_targets,
-											max_predicate_lock_targets,
-											&info,
-											HASH_ELEM | HASH_BLOBS |
-											HASH_PARTITION | HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
+	opts.fixed_size = true;
+	PredicateLockTargetHash = shmem_hash_make_ext(PREDICATELOCKTARGET, tag,
+												  "PREDICATELOCKTARGET hash",
+												  max_predicate_lock_targets, max_predicate_lock_targets,
+												  &opts);
 
 	/*
 	 * Reserve a dummy entry in the hash table; we use it to make sure there's
@@ -1209,20 +1207,17 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for PREDICATELOCK structs.  This stores per
 	 * xact-lock-of-a-target information.
 	 */
-	info.keysize = sizeof(PREDICATELOCKTAG);
-	info.entrysize = sizeof(PREDICATELOCK);
-	info.hash = predicatelock_hash;
-	info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
-
 	/* Assume an average of 2 xacts per target */
 	max_predicate_locks = max_predicate_lock_targets * 2;
 
-	PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
-									  max_predicate_locks,
-									  max_predicate_locks,
-									  &info,
-									  HASH_ELEM | HASH_FUNCTION |
-									  HASH_PARTITION | HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.hash = predicatelock_hash;
+	opts.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
+	opts.fixed_size = true;
+	PredicateLockHash = shmem_hash_make_ext(PREDICATELOCK, tag,
+											"PREDICATELOCK hash",
+											max_predicate_locks, max_predicate_locks,
+											&opts);
 
 	/*
 	 * Compute size for serializable transaction hashtable. Note these
@@ -1293,15 +1288,12 @@ PredicateLockShmemInit(void)
 	 * Allocate hash table for SERIALIZABLEXID structs.  This stores per-xid
 	 * information for serializable transactions which have accessed data.
 	 */
-	info.keysize = sizeof(SERIALIZABLEXIDTAG);
-	info.entrysize = sizeof(SERIALIZABLEXID);
-
-	SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
-										max_serializable_xacts,
-										max_serializable_xacts,
-										&info,
-										HASH_ELEM | HASH_BLOBS |
-										HASH_FIXED_SIZE);
+	MemSet(&opts, 0, sizeof(opts));
+	opts.fixed_size = true;
+	SerializableXidHash = shmem_hash_make_ext(SERIALIZABLEXID, tag,
+											  "SERIALIZABLEXID hash",
+											  max_serializable_xacts, max_serializable_xacts,
+											  &opts);
 
 	/*
 	 * Allocate space for tracking rw-conflicts in lists attached to the
@@ -1950,16 +1942,12 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
 static void
 CreateLocalPredicateLockHash(void)
 {
-	HASHCTL		hash_ctl;
-
 	/* Initialize the backend-local hash table of parent locks */
 	Assert(LocalPredicateLockHash == NULL);
-	hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
-	hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
-	LocalPredicateLockHash = hash_create("Local predicate lock",
-										 max_predicate_locks_per_xact,
-										 &hash_ctl,
-										 HASH_ELEM | HASH_BLOBS);
+	LocalPredicateLockHash = hash_make_cxt(LOCALPREDICATELOCK, tag,
+										   "Local predicate lock",
+										   max_predicate_locks_per_xact,
+										   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 5391640d861..e48b383b466 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -73,6 +73,7 @@
 #include "storage/smgr.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 
 
 /*
@@ -250,12 +251,9 @@ smgropen(RelFileLocator rlocator, ProcNumber backend)
 	if (SMgrRelationHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(RelFileLocatorBackend);
-		ctl.entrysize = sizeof(SMgrRelationData);
-		SMgrRelationHash = hash_create("smgr relation table", 400,
-									   &ctl, HASH_ELEM | HASH_BLOBS);
+		SMgrRelationHash = hash_make_cxt(SMgrRelationData, smgr_rlocator,
+										 "smgr relation table", 400,
+										 TopMemoryContext);
 		dlist_init(&unpinned_relns);
 	}
 
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index 2c964b6f3d9..b8f3136e31d 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -131,8 +131,6 @@ InitSync(void)
 	 */
 	if (!IsUnderPostmaster || AmCheckpointerProcess())
 	{
-		HASHCTL		hash_ctl;
-
 		/*
 		 * XXX: The checkpointer needs to add entries to the pending ops table
 		 * when absorbing fsync requests.  That is done within a critical
@@ -147,13 +145,8 @@ InitSync(void)
 											  ALLOCSET_DEFAULT_SIZES);
 		MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
 
-		hash_ctl.keysize = sizeof(FileTag);
-		hash_ctl.entrysize = sizeof(PendingFsyncEntry);
-		hash_ctl.hcxt = pendingOpsCxt;
-		pendingOps = hash_create("Pending Ops Table",
-								 100L,
-								 &hash_ctl,
-								 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		pendingOps = hash_make_cxt(PendingFsyncEntry, tag,
+								   "Pending Ops Table", 100L, pendingOpsCxt);
 		pendingUnlinks = NIL;
 	}
 }
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 48ee050e37f..49ec4ab4db5 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *lexemes_tab;
-	HASHCTL		hash_ctl;
 	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
@@ -180,15 +179,9 @@ compute_tsvector_stats(VacAttrStats *stats,
 	 * worry about overflowing the initial size. Also we don't need to pay any
 	 * attention to locking and memory management.
 	 */
-	hash_ctl.keysize = sizeof(LexemeHashKey);
-	hash_ctl.entrysize = sizeof(TrackItem);
-	hash_ctl.hash = lexeme_hash;
-	hash_ctl.match = lexeme_match;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	lexemes_tab = hash_create("Analyzed lexemes table",
-							  num_mcelem,
-							  &hash_ctl,
-							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	lexemes_tab = hash_make_fn(TrackItem, key,
+							   "Analyzed lexemes table", num_mcelem,
+							   lexeme_hash, lexeme_match);
 
 	/* Initialize counters. */
 	b_current = 1;
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index e5a2289f0b0..3cf2d029d0c 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -120,7 +120,6 @@ void
 WaitEventCustomShmemInit(void)
 {
 	bool		found;
-	HASHCTL		info;
 
 	WaitEventCustomCounter = (WaitEventCustomCounterData *)
 		ShmemInitStruct("WaitEventCustomCounterData",
@@ -134,24 +133,18 @@ WaitEventCustomShmemInit(void)
 	}
 
 	/* initialize or attach the hash tables to store custom wait events */
-	info.keysize = sizeof(uint32);
-	info.entrysize = sizeof(WaitEventCustomEntryByInfo);
 	WaitEventCustomHashByInfo =
-		ShmemInitHash("WaitEventCustom hash by wait event information",
-					  WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
-					  WAIT_EVENT_CUSTOM_HASH_MAX_SIZE,
-					  &info,
-					  HASH_ELEM | HASH_BLOBS);
+		shmem_hash_make(WaitEventCustomEntryByInfo, wait_event_info,
+						"WaitEventCustom hash by wait event information",
+						WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
+						WAIT_EVENT_CUSTOM_HASH_MAX_SIZE);
 
 	/* key is a NULL-terminated string */
-	info.keysize = sizeof(char[NAMEDATALEN]);
-	info.entrysize = sizeof(WaitEventCustomEntryByName);
 	WaitEventCustomHashByName =
-		ShmemInitHash("WaitEventCustom hash by name",
-					  WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
-					  WAIT_EVENT_CUSTOM_HASH_MAX_SIZE,
-					  &info,
-					  HASH_ELEM | HASH_STRINGS);
+		shmem_hash_make(WaitEventCustomEntryByName, wait_event_name,
+						"WaitEventCustom hash by name",
+						WAIT_EVENT_CUSTOM_HASH_INIT_SIZE,
+						WAIT_EVENT_CUSTOM_HASH_MAX_SIZE);
 }
 
 /*
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index 7bb000ddbd3..bdc7e2237f6 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *elements_tab;
-	HASHCTL		elem_hash_ctl;
 	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
@@ -236,7 +235,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	TrackItem  *item;
 	int			slot_idx;
 	HTAB	   *count_tab;
-	HASHCTL		count_hash_ctl;
 	DECountItem *count_item;
 
 	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
@@ -276,24 +274,13 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	 * worry about overflowing the initial size. Also we don't need to pay any
 	 * attention to locking and memory management.
 	 */
-	elem_hash_ctl.keysize = sizeof(Datum);
-	elem_hash_ctl.entrysize = sizeof(TrackItem);
-	elem_hash_ctl.hash = element_hash;
-	elem_hash_ctl.match = element_match;
-	elem_hash_ctl.hcxt = CurrentMemoryContext;
-	elements_tab = hash_create("Analyzed elements table",
-							   num_mcelem,
-							   &elem_hash_ctl,
-							   HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	elements_tab = hash_make_fn(TrackItem, key,
+								"Analyzed elements table", num_mcelem,
+								element_hash, element_match);
 
 	/* hashtable for array distinct elements counts */
-	count_hash_ctl.keysize = sizeof(int);
-	count_hash_ctl.entrysize = sizeof(DECountItem);
-	count_hash_ctl.hcxt = CurrentMemoryContext;
-	count_tab = hash_create("Array distinct element count table",
-							64,
-							&count_hash_ctl,
-							HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	count_tab = hash_make(DECountItem, count,
+						  "Array distinct element count table", 64);
 
 	/* Initialize counters. */
 	b_current = 1;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 0fee1b40d63..d6c018ca519 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -901,19 +901,8 @@ json_unique_hash_match(const void *key1, const void *key2, Size keysize)
 static void
 json_unique_check_init(JsonUniqueCheckState *cxt)
 {
-	HASHCTL		ctl;
-
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.keysize = sizeof(JsonUniqueHashEntry);
-	ctl.entrysize = sizeof(JsonUniqueHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	ctl.hash = json_unique_hash;
-	ctl.match = json_unique_hash_match;
-
-	*cxt = hash_create("json object hashtable",
-					   32,
-					   &ctl,
-					   HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION | HASH_COMPARE);
+	*cxt = hashset_make_fn(JsonUniqueHashEntry, "json object hashtable", 32,
+						   json_unique_hash, json_unique_hash_match);
 }
 
 static void
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 97cc3d60340..fc2e533a054 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -3811,18 +3811,12 @@ static HTAB *
 get_json_object_as_hash(const char *json, int len, const char *funcname,
 						Node *escontext)
 {
-	HASHCTL		ctl;
 	HTAB	   *tab;
 	JHashState *state;
 	JsonSemAction *sem;
 
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(JsonHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	tab = hash_create("json object hashtable",
-					  100,
-					  &ctl,
-					  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	tab = hash_make(JsonHashEntry, fname,
+					"json object hashtable", 100);
 
 	state = palloc0_object(JHashState);
 	sem = palloc0_object(JsonSemAction);
@@ -4216,7 +4210,6 @@ populate_recordset_object_start(void *state)
 {
 	PopulateRecordsetState *_state = (PopulateRecordsetState *) state;
 	int			lex_level = _state->lex->lex_level;
-	HASHCTL		ctl;
 
 	/* Reject object at top level: we must have an array at level 0 */
 	if (lex_level == 0)
@@ -4230,13 +4223,8 @@ populate_recordset_object_start(void *state)
 		return JSON_SUCCESS;
 
 	/* Object at level 1: set up a new hash table for this object */
-	ctl.keysize = NAMEDATALEN;
-	ctl.entrysize = sizeof(JsonHashEntry);
-	ctl.hcxt = CurrentMemoryContext;
-	_state->json_hash = hash_create("json object hashtable",
-									100,
-									&ctl,
-									HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	_state->json_hash = hash_make(JsonHashEntry, fname,
+								  "json object hashtable", 100);
 
 	return JSON_SUCCESS;
 }
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 1a4dbbeb8db..4d0c811bdd0 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -188,17 +188,10 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
 	int			context_id;
 	List	   *contexts;
-	HASHCTL		ctl;
 	HTAB	   *context_id_lookup;
 
-	ctl.keysize = sizeof(MemoryContext);
-	ctl.entrysize = sizeof(MemoryContextId);
-	ctl.hcxt = CurrentMemoryContext;
-
-	context_id_lookup = hash_create("pg_get_backend_memory_contexts",
-									256,
-									&ctl,
-									HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	context_id_lookup = hash_make(MemoryContextId, context,
+								  "pg_get_backend_memory_contexts", 256);
 
 	InitMaterializedSRF(fcinfo, 0);
 
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 2de08da6539..243fb2c4197 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -3307,30 +3307,25 @@ ri_NullCheck(TupleDesc tupDesc,
 static void
 ri_InitHashTables(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RI_ConstraintInfo);
-	ri_constraint_cache = hash_create("RI constraint cache",
-									  RI_INIT_CONSTRAINTHASHSIZE,
-									  &ctl, HASH_ELEM | HASH_BLOBS);
+	ri_constraint_cache = hash_make_cxt(RI_ConstraintInfo, constraint_id,
+										"RI constraint cache",
+										RI_INIT_CONSTRAINTHASHSIZE,
+										TopMemoryContext);
 
 	/* Arrange to flush cache on pg_constraint changes */
 	CacheRegisterSyscacheCallback(CONSTROID,
 								  InvalidateConstraintCacheCallBack,
 								  (Datum) 0);
 
-	ctl.keysize = sizeof(RI_QueryKey);
-	ctl.entrysize = sizeof(RI_QueryHashEntry);
-	ri_query_cache = hash_create("RI query cache",
-								 RI_INIT_QUERYHASHSIZE,
-								 &ctl, HASH_ELEM | HASH_BLOBS);
-
-	ctl.keysize = sizeof(RI_CompareKey);
-	ctl.entrysize = sizeof(RI_CompareHashEntry);
-	ri_compare_cache = hash_create("RI compare cache",
+	ri_query_cache = hash_make_cxt(RI_QueryHashEntry, key,
+								   "RI query cache",
 								   RI_INIT_QUERYHASHSIZE,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+								   TopMemoryContext);
+
+	ri_compare_cache = hash_make_cxt(RI_CompareHashEntry, key,
+									 "RI compare cache",
+									 RI_INIT_QUERYHASHSIZE,
+									 TopMemoryContext);
 }
 
 
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index aec5556b008..f22d1bb7ce7 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -4246,7 +4246,6 @@ static void
 set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
 				 Bitmapset *rels_used)
 {
-	HASHCTL		hash_ctl;
 	HTAB	   *names_hash;
 	NameHashEntry *hentry;
 	bool		found;
@@ -4262,13 +4261,9 @@ set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
 	 * We use a hash table to hold known names, so that this process is O(N)
 	 * not O(N^2) for N names.
 	 */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = sizeof(NameHashEntry);
-	hash_ctl.hcxt = CurrentMemoryContext;
-	names_hash = hash_create("set_rtable_names names",
-							 list_length(dpns->rtable),
-							 &hash_ctl,
-							 HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	names_hash = hash_make(NameHashEntry, name,
+						   "set_rtable_names names",
+						   list_length(dpns->rtable));
 
 	/* Preload the hash table with names appearing in parent_namespaces */
 	foreach(lc, parent_namespaces)
@@ -5339,7 +5334,6 @@ expand_colnames_array_to(deparse_columns *colinfo, int n)
 static void
 build_colinfo_names_hash(deparse_columns *colinfo)
 {
-	HASHCTL		hash_ctl;
 	int			i;
 	ListCell   *lc;
 
@@ -5355,13 +5349,10 @@ build_colinfo_names_hash(deparse_columns *colinfo)
 	 * Set up the hash table.  The entries are just strings with no other
 	 * payload.
 	 */
-	hash_ctl.keysize = NAMEDATALEN;
-	hash_ctl.entrysize = NAMEDATALEN;
-	hash_ctl.hcxt = CurrentMemoryContext;
-	colinfo->names_hash = hash_create("deparse_columns names",
-									  colinfo->num_cols + colinfo->num_new_cols,
-									  &hash_ctl,
-									  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+	colinfo->names_hash =
+		hashset_make_cxt(NameData, "deparse_columns names",
+						 colinfo->num_cols + colinfo->num_new_cols,
+						 CurrentMemoryContext);
 
 	/*
 	 * Preload the hash table with any names already present (these would have
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 9244a23013e..41712180b1c 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -21,6 +21,7 @@
 #include "utils/catcache.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 #include "varatt.h"
 
@@ -97,22 +98,15 @@ relatt_cache_syshash(const void *key, Size keysize)
 static void
 InitializeAttoptCache(void)
 {
-	HASHCTL		ctl;
-
-	/* Initialize the hash table. */
-	ctl.keysize = sizeof(AttoptCacheKey);
-	ctl.entrysize = sizeof(AttoptCacheEntry);
-
 	/*
 	 * AttoptCacheEntry takes hash value from the system cache. For
 	 * AttoptCacheHash we use the same hash in order to speedup search by hash
 	 * value. This is used by hash_seq_init_with_hash_value().
 	 */
-	ctl.hash = relatt_cache_syshash;
-
-	AttoptCacheHash =
-		hash_create("Attopt cache", 256, &ctl,
-					HASH_ELEM | HASH_FUNCTION);
+	AttoptCacheHash = hash_make_fn_cxt(AttoptCacheEntry, key,
+									   "Attopt cache", 256,
+									   relatt_cache_syshash, NULL,
+									   TopMemoryContext);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index 3fe89c9c98f..b5b72a6a47e 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -77,7 +77,6 @@ EventCacheLookup(EventTriggerEvent event)
 static void
 BuildEventTriggerCache(void)
 {
-	HASHCTL		ctl;
 	HTAB	   *cache;
 	Relation	rel;
 	Relation	irel;
@@ -114,11 +113,9 @@ BuildEventTriggerCache(void)
 	EventTriggerCacheState = ETCS_REBUILD_STARTED;
 
 	/* Create new hash table. */
-	ctl.keysize = sizeof(EventTriggerEvent);
-	ctl.entrysize = sizeof(EventTriggerCacheEntry);
-	ctl.hcxt = EventTriggerCacheContext;
-	cache = hash_create("EventTriggerCacheHash", 32, &ctl,
-						HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	cache = hash_make_cxt(EventTriggerCacheEntry, event,
+						  "EventTriggerCacheHash", 32,
+						  EventTriggerCacheContext);
 
 	/*
 	 * Prepare to scan pg_event_trigger in name order.
diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c
index 701c294b88d..bef938c37c0 100644
--- a/src/backend/utils/cache/funccache.c
+++ b/src/backend/utils/cache/funccache.c
@@ -58,19 +58,13 @@ static int	cfunc_match(const void *key1, const void *key2, Size keysize);
 static void
 cfunc_hashtable_init(void)
 {
-	HASHCTL		ctl;
-
 	/* don't allow double-initialization */
 	Assert(cfunc_hashtable == NULL);
 
-	ctl.keysize = sizeof(CachedFunctionHashKey);
-	ctl.entrysize = sizeof(CachedFunctionHashEntry);
-	ctl.hash = cfunc_hash;
-	ctl.match = cfunc_match;
-	cfunc_hashtable = hash_create("Cached function hash",
-								  FUNCS_PER_USER,
-								  &ctl,
-								  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+	cfunc_hashtable = hash_make_fn_cxt(CachedFunctionHashEntry, key,
+									   "Cached function hash", FUNCS_PER_USER,
+									   cfunc_hash, cfunc_match,
+									   TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index e19f0d3e51c..79a291324c0 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1668,17 +1668,14 @@ LookupOpclassInfo(Oid operatorClassOid,
 
 	if (OpClassCache == NULL)
 	{
-		/* First time through: initialize the opclass cache */
-		HASHCTL		ctl;
-
 		/* Also make sure CacheMemoryContext exists */
 		if (!CacheMemoryContext)
 			CreateCacheMemoryContext();
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(OpClassCacheEnt);
-		OpClassCache = hash_create("Operator class cache", 64,
-								   &ctl, HASH_ELEM | HASH_BLOBS);
+		/* First time through: initialize the opclass cache */
+		OpClassCache = hash_make_cxt(OpClassCacheEnt, opclassoid,
+									 "Operator class cache", 64,
+									 TopMemoryContext);
 	}
 
 	opcentry = (OpClassCacheEnt *) hash_search(OpClassCache,
@@ -3993,7 +3990,6 @@ RelationAssumeNewRelfilelocator(Relation relation)
 void
 RelationCacheInitialize(void)
 {
-	HASHCTL		ctl;
 	int			allocsize;
 
 	/*
@@ -4005,10 +4001,9 @@ RelationCacheInitialize(void)
 	/*
 	 * create hashtable that indexes the relcache
 	 */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(RelIdCacheEnt);
-	RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
-								  &ctl, HASH_ELEM | HASH_BLOBS);
+	RelationIdCache = hash_make_cxt(RelIdCacheEnt, reloid,
+									"Relcache by OID", INITRELCACHESIZE,
+									TopMemoryContext);
 
 	/*
 	 * reserve enough in_progress_list slots for many cases
diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c
index 6f970fafa05..09b05dec9d0 100644
--- a/src/backend/utils/cache/relfilenumbermap.c
+++ b/src/backend/utils/cache/relfilenumbermap.c
@@ -85,7 +85,6 @@ RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
 static void
 InitializeRelfilenumberMap(void)
 {
-	HASHCTL		ctl;
 	int			i;
 
 	/* Make sure we've initialized CacheMemoryContext. */
@@ -113,13 +112,9 @@ InitializeRelfilenumberMap(void)
 	 * initialized when fmgr_info_cxt() above ERRORs out with an out of memory
 	 * error.
 	 */
-	ctl.keysize = sizeof(RelfilenumberMapKey);
-	ctl.entrysize = sizeof(RelfilenumberMapEntry);
-	ctl.hcxt = CacheMemoryContext;
-
 	RelfilenumberMapHash =
-		hash_create("RelfilenumberMap cache", 64, &ctl,
-					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		hash_make_cxt(RelfilenumberMapEntry, key,
+					  "RelfilenumberMap cache", 64, CacheMemoryContext);
 
 	/* Watch for invalidation events. */
 	CacheRegisterRelcacheCallback(RelfilenumberMapInvalidateCallback,
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 362169b7d97..acff62a38be 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -27,6 +27,7 @@
 #include "utils/catcache.h"
 #include "utils/hsearch.h"
 #include "utils/inval.h"
+#include "utils/memutils.h"
 #include "utils/spccache.h"
 #include "utils/syscache.h"
 #include "varatt.h"
@@ -78,14 +79,10 @@ InvalidateTableSpaceCacheCallback(Datum arg, SysCacheIdentifier cacheid,
 static void
 InitializeTableSpaceCache(void)
 {
-	HASHCTL		ctl;
-
 	/* Initialize the hash table. */
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(TableSpaceCacheEntry);
 	TableSpaceCacheHash =
-		hash_create("TableSpace cache", 16, &ctl,
-					HASH_ELEM | HASH_BLOBS);
+		hash_make_cxt(TableSpaceCacheEntry, oid,
+					  "TableSpace cache", 16, TopMemoryContext);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 9e29f1386b0..71e57076002 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -118,12 +118,9 @@ lookup_ts_parser_cache(Oid prsId)
 	if (TSParserCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TSParserCacheEntry);
-		TSParserCacheHash = hash_create("Tsearch parser cache", 4,
-										&ctl, HASH_ELEM | HASH_BLOBS);
+		TSParserCacheHash = hash_make_cxt(TSParserCacheEntry, prsId,
+										  "Tsearch parser cache", 4,
+										  TopMemoryContext);
 		/* Flush cache on pg_ts_parser changes */
 		CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack,
 									  PointerGetDatum(TSParserCacheHash));
@@ -213,12 +210,9 @@ lookup_ts_dictionary_cache(Oid dictId)
 	if (TSDictionaryCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TSDictionaryCacheEntry);
-		TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8,
-											&ctl, HASH_ELEM | HASH_BLOBS);
+		TSDictionaryCacheHash = hash_make_cxt(TSDictionaryCacheEntry, dictId,
+											  "Tsearch dictionary cache", 8,
+											  TopMemoryContext);
 		/* Flush cache on pg_ts_dict and pg_ts_template changes */
 		CacheRegisterSyscacheCallback(TSDICTOID, InvalidateTSCacheCallBack,
 									  PointerGetDatum(TSDictionaryCacheHash));
@@ -364,12 +358,9 @@ lookup_ts_dictionary_cache(Oid dictId)
 static void
 init_ts_config_cache(void)
 {
-	HASHCTL		ctl;
-
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(TSConfigCacheEntry);
-	TSConfigCacheHash = hash_create("Tsearch configuration cache", 16,
-									&ctl, HASH_ELEM | HASH_BLOBS);
+	TSConfigCacheHash = hash_make_cxt(TSConfigCacheEntry, cfgId,
+									  "Tsearch configuration cache", 16,
+									  TopMemoryContext);
 	/* Flush cache on pg_ts_config and pg_ts_config_map changes */
 	CacheRegisterSyscacheCallback(TSCONFIGOID, InvalidateTSCacheCallBack,
 								  PointerGetDatum(TSConfigCacheHash));
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index cebe7a916fb..5e6650404f4 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -395,28 +395,23 @@ lookup_type_cache(Oid type_id, int flags)
 	if (TypeCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
 		int			allocsize;
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(TypeCacheEntry);
-
 		/*
 		 * TypeCacheEntry takes hash value from the system cache. For
 		 * TypeCacheHash we use the same hash in order to speedup search by
 		 * hash value. This is used by hash_seq_init_with_hash_value().
 		 */
-		ctl.hash = type_cache_syshash;
-
-		TypeCacheHash = hash_create("Type information cache", 64,
-									&ctl, HASH_ELEM | HASH_FUNCTION);
+		TypeCacheHash = hash_make_fn_cxt(TypeCacheEntry, type_id,
+										 "Type information cache", 64,
+										 type_cache_syshash, NULL,
+										 TopMemoryContext);
 
 		Assert(RelIdToTypeIdCacheHash == NULL);
 
-		ctl.keysize = sizeof(Oid);
-		ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
-		RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
-											 &ctl, HASH_ELEM | HASH_BLOBS);
+		RelIdToTypeIdCacheHash = hash_make_cxt(RelIdToTypeIdCacheEntry, relid,
+											   "Map from relid to OID of cached composite type",
+											   64, TopMemoryContext);
 
 		/* Also set up callbacks for SI invalidations */
 		CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
@@ -2076,15 +2071,11 @@ assign_record_type_typmod(TupleDesc tupDesc)
 	if (RecordCacheHash == NULL)
 	{
 		/* First time through: initialize the hash table */
-		HASHCTL		ctl;
-
-		ctl.keysize = sizeof(TupleDesc);	/* just the pointer */
-		ctl.entrysize = sizeof(RecordCacheEntry);
-		ctl.hash = record_type_typmod_hash;
-		ctl.match = record_type_typmod_compare;
-		RecordCacheHash = hash_create("Record information cache", 64,
-									  &ctl,
-									  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+		RecordCacheHash = hash_make_fn_cxt(RecordCacheEntry, tupdesc,
+										   "Record information cache", 64,
+										   record_type_typmod_hash,
+										   record_type_typmod_compare,
+										   TopMemoryContext);
 
 		/* Also make sure CacheMemoryContext exists */
 		if (!CacheMemoryContext)
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index e636cc81cf8..5d99467e2bd 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -26,6 +26,7 @@
 #include "storage/fd.h"
 #include "storage/shmem.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 
 /* signature for PostgreSQL-specific library init function */
@@ -671,14 +672,9 @@ find_rendezvous_variable(const char *varName)
 	/* Create a hashtable if we haven't already done so in this process */
 	if (rendezvousHash == NULL)
 	{
-		HASHCTL		ctl;
-
-		ctl.keysize = NAMEDATALEN;
-		ctl.entrysize = sizeof(rendezvousHashEntry);
-		rendezvousHash = hash_create("Rendezvous variable hash",
-									 16,
-									 &ctl,
-									 HASH_ELEM | HASH_STRINGS);
+		rendezvousHash = hash_make_cxt(rendezvousHashEntry, varName,
+									   "Rendezvous variable hash", 16,
+									   TopMemoryContext);
 	}
 
 	/* Find or create the hashtable entry for this varName */
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index bfeceb7a92f..db8c43d92ce 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -33,6 +33,7 @@
 #include "utils/guc.h"
 #include "utils/hsearch.h"
 #include "utils/lsyscache.h"
+#include "utils/memutils.h"
 #include "utils/syscache.h"
 
 /*
@@ -548,14 +549,8 @@ record_C_func(HeapTuple procedureTuple,
 	/* Create the hash table if it doesn't exist yet */
 	if (CFuncHash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(Oid);
-		hash_ctl.entrysize = sizeof(CFuncHashTabEntry);
-		CFuncHash = hash_create("CFuncHash",
-								100,
-								&hash_ctl,
-								HASH_ELEM | HASH_BLOBS);
+		CFuncHash = hash_make_cxt(CFuncHashTabEntry, fn_oid,
+								  "CFuncHash", 100, TopMemoryContext);
 	}
 
 	entry = (CFuncHashTabEntry *)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index e1546d9c97a..cd99d72e8a8 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -872,7 +872,6 @@ build_guc_variables(void)
 {
 	int			size_vars;
 	int			num_vars = 0;
-	HASHCTL		hash_ctl;
 	GUCHashEntry *hentry;
 	bool		found;
 
@@ -895,15 +894,10 @@ build_guc_variables(void)
 	 */
 	size_vars = num_vars + num_vars / 4;
 
-	hash_ctl.keysize = sizeof(char *);
-	hash_ctl.entrysize = sizeof(GUCHashEntry);
-	hash_ctl.hash = guc_name_hash;
-	hash_ctl.match = guc_name_match;
-	hash_ctl.hcxt = GUCMemoryContext;
-	guc_hashtab = hash_create("GUC hash table",
-							  size_vars,
-							  &hash_ctl,
-							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+	guc_hashtab = hash_make_fn_cxt(GUCHashEntry, gucname,
+								   "GUC hash table", size_vars,
+								   guc_name_hash, guc_name_match,
+								   GUCMemoryContext);
 
 	for (int i = 0; ConfigureNames[i].name; i++)
 	{
diff --git a/src/backend/utils/misc/injection_point.c b/src/backend/utils/misc/injection_point.c
index c06b0e9b800..a6cb46402aa 100644
--- a/src/backend/utils/misc/injection_point.c
+++ b/src/backend/utils/misc/injection_point.c
@@ -127,16 +127,10 @@ injection_point_cache_add(const char *name,
 	/* If first time, initialize */
 	if (InjectionPointCache == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = sizeof(char[INJ_NAME_MAXLEN]);
-		hash_ctl.entrysize = sizeof(InjectionPointCacheEntry);
-		hash_ctl.hcxt = TopMemoryContext;
-
-		InjectionPointCache = hash_create("InjectionPoint cache hash",
-										  MAX_INJECTION_POINTS,
-										  &hash_ctl,
-										  HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+		InjectionPointCache = hash_make_cxt(InjectionPointCacheEntry, name,
+											"InjectionPoint cache hash",
+											MAX_INJECTION_POINTS,
+											TopMemoryContext);
 	}
 
 	entry = (InjectionPointCacheEntry *)
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 493f9b0ee19..f8a6db5316a 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -105,23 +105,19 @@ static MemoryContext TopPortalContext = NULL;
 void
 EnablePortalManager(void)
 {
-	HASHCTL		ctl;
-
 	Assert(TopPortalContext == NULL);
 
 	TopPortalContext = AllocSetContextCreate(TopMemoryContext,
 											 "TopPortalContext",
 											 ALLOCSET_DEFAULT_SIZES);
 
-	ctl.keysize = MAX_PORTALNAME_LEN;
-	ctl.entrysize = sizeof(PortalHashEnt);
-
 	/*
 	 * use PORTALS_PER_USER as a guess of how many hash table entries to
 	 * create, initially
 	 */
-	PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
-								  &ctl, HASH_ELEM | HASH_STRINGS);
+	PortalHashTable = hash_make_cxt(PortalHashEnt, portalname,
+									"Portal hash", PORTALS_PER_USER,
+									TopMemoryContext);
 }
 
 /*
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 614b7c1006b..4c31675535a 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -214,8 +214,6 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
 	 */
 	if (comboHash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
 		/* Make array first; existence of hash table asserts array exists */
 		comboCids = (ComboCidKeyData *)
 			MemoryContextAlloc(TopTransactionContext,
@@ -223,14 +221,9 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
 		sizeComboCids = CCID_ARRAY_SIZE;
 		usedComboCids = 0;
 
-		hash_ctl.keysize = sizeof(ComboCidKeyData);
-		hash_ctl.entrysize = sizeof(ComboCidEntryData);
-		hash_ctl.hcxt = TopTransactionContext;
-
-		comboHash = hash_create("Combo CIDs",
-								CCID_HASH_SIZE,
-								&hash_ctl,
-								HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		comboHash = hash_make_cxt(ComboCidEntryData, key,
+								  "Combo CIDs", CCID_HASH_SIZE,
+								  TopTransactionContext);
 	}
 
 	/*
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 06ebffa111c..bc5bd5fa051 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -391,7 +391,6 @@ _PG_init(void)
 	 * "plperl.use_strict"
 	 */
 	static bool inited = false;
-	HASHCTL		hash_ctl;
 
 	if (inited)
 		return;
@@ -461,19 +460,13 @@ _PG_init(void)
 	/*
 	 * Create hash tables.
 	 */
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(plperl_interp_desc);
-	plperl_interp_hash = hash_create("PL/Perl interpreters",
-									 8,
-									 &hash_ctl,
-									 HASH_ELEM | HASH_BLOBS);
-
-	hash_ctl.keysize = sizeof(plperl_proc_key);
-	hash_ctl.entrysize = sizeof(plperl_proc_ptr);
-	plperl_proc_hash = hash_create("PL/Perl procedures",
-								   32,
-								   &hash_ctl,
-								   HASH_ELEM | HASH_BLOBS);
+	plperl_interp_hash = hash_make_cxt(plperl_interp_desc, user_id,
+									   "PL/Perl interpreters", 8,
+									   TopMemoryContext);
+
+	plperl_proc_hash = hash_make_cxt(plperl_proc_ptr, proc_key,
+									 "PL/Perl procedures", 32,
+									 TopMemoryContext);
 
 	/*
 	 * Save the default opmask.
@@ -579,14 +572,9 @@ select_perl_context(bool trusted)
 	/* Make sure we have a query_hash for this interpreter */
 	if (interp_desc->query_hash == NULL)
 	{
-		HASHCTL		hash_ctl;
-
-		hash_ctl.keysize = NAMEDATALEN;
-		hash_ctl.entrysize = sizeof(plperl_query_entry);
-		interp_desc->query_hash = hash_create("PL/Perl queries",
-											  32,
-											  &hash_ctl,
-											  HASH_ELEM | HASH_STRINGS);
+		interp_desc->query_hash = hash_make_cxt(plperl_query_entry, query_name,
+												"PL/Perl queries", 32,
+												TopMemoryContext);
 	}
 
 	/*
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 65b0fd0790f..579eece4c6f 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -4015,8 +4015,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 					 EState *simple_eval_estate,
 					 ResourceOwner simple_eval_resowner)
 {
-	HASHCTL		ctl;
-
 	/* this link will be restored at exit from plpgsql_call_handler */
 	func->cur_estate = estate;
 
@@ -4071,12 +4069,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 	/* Create the session-wide cast-expression hash if we didn't already */
 	if (cast_expr_hash == NULL)
 	{
-		ctl.keysize = sizeof(plpgsql_CastHashKey);
-		ctl.entrysize = sizeof(plpgsql_CastExprHashEntry);
-		cast_expr_hash = hash_create("PLpgSQL cast expressions",
-									 16,	/* start small and extend */
-									 &ctl,
-									 HASH_ELEM | HASH_BLOBS);
+		cast_expr_hash = hash_make_cxt(plpgsql_CastExprHashEntry, key,
+									   "PLpgSQL cast expressions",
+									   16,	/* start small and extend */
+									   TopMemoryContext);
 	}
 
 	/* set up for use of appropriate simple-expression EState and cast hash */
@@ -4084,13 +4080,9 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 	{
 		estate->simple_eval_estate = simple_eval_estate;
 		/* Private cast hash just lives in function's main context */
-		ctl.keysize = sizeof(plpgsql_CastHashKey);
-		ctl.entrysize = sizeof(plpgsql_CastHashEntry);
-		ctl.hcxt = CurrentMemoryContext;
-		estate->cast_hash = hash_create("PLpgSQL private cast cache",
-										16, /* start small and extend */
-										&ctl,
-										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+		estate->cast_hash = hash_make(plpgsql_CastHashEntry, key,
+									  "PLpgSQL private cast cache",
+									  16);	/* start small and extend */
 	}
 	else
 	{
@@ -4098,12 +4090,10 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
 		/* Create the session-wide cast-info hash table if we didn't already */
 		if (shared_cast_hash == NULL)
 		{
-			ctl.keysize = sizeof(plpgsql_CastHashKey);
-			ctl.entrysize = sizeof(plpgsql_CastHashEntry);
-			shared_cast_hash = hash_create("PLpgSQL cast cache",
-										   16,	/* start small and extend */
-										   &ctl,
-										   HASH_ELEM | HASH_BLOBS);
+			shared_cast_hash = hash_make_cxt(plpgsql_CastHashEntry, key,
+											 "PLpgSQL cast cache",
+											 16,	/* start small and extend */
+											 TopMemoryContext);
 		}
 		estate->cast_hash = shared_cast_hash;
 	}
diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c
index 72806c17e17..71a76b85ec8 100644
--- a/src/pl/plpython/plpy_plpymodule.c
+++ b/src/pl/plpython/plpy_plpymodule.c
@@ -16,6 +16,7 @@
 #include "plpy_subxactobject.h"
 #include "plpy_util.h"
 #include "utils/builtins.h"
+#include "utils/memutils.h"
 
 HTAB	   *PLy_spi_exceptions = NULL;
 
@@ -145,7 +146,6 @@ static void
 PLy_add_exceptions(PyObject *plpy)
 {
 	PyObject   *excmod;
-	HASHCTL		hash_ctl;
 
 	PLy_exc_error = PLy_create_exception("plpy.Error", NULL, NULL,
 										 "Error", plpy);
@@ -158,10 +158,9 @@ PLy_add_exceptions(PyObject *plpy)
 	if (excmod == NULL)
 		PLy_elog(ERROR, "could not create the spiexceptions module");
 
-	hash_ctl.keysize = sizeof(int);
-	hash_ctl.entrysize = sizeof(PLyExceptionEntry);
-	PLy_spi_exceptions = hash_create("PL/Python SPI exceptions", 256,
-									 &hash_ctl, HASH_ELEM | HASH_BLOBS);
+	PLy_spi_exceptions = hash_make_cxt(PLyExceptionEntry, sqlstate,
+									   "PL/Python SPI exceptions", 256,
+									   TopMemoryContext);
 
 	PLy_generate_spi_exceptions(excmod, PLy_exc_spi_error);
 
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
index 750ba586e0c..fc728f7ab8a 100644
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -29,12 +29,9 @@ static char *PLy_procedure_munge_source(const char *name, const char *src);
 void
 init_procedure_caches(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = sizeof(PLyProcedureKey);
-	hash_ctl.entrysize = sizeof(PLyProcedureEntry);
-	PLy_procedure_cache = hash_create("PL/Python procedures", 32, &hash_ctl,
-									  HASH_ELEM | HASH_BLOBS);
+	PLy_procedure_cache = hash_make_cxt(PLyProcedureEntry, key,
+										"PL/Python procedures", 32,
+										TopMemoryContext);
 }
 
 /*
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 85e83bbf1e3..8d5e8edbfe0 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -410,7 +410,6 @@ void
 _PG_init(void)
 {
 	Tcl_NotifierProcs notifier;
-	HASHCTL		hash_ctl;
 
 	/* Be sure we do initialization only once (should be redundant now) */
 	if (pltcl_pm_init_done)
@@ -448,22 +447,16 @@ _PG_init(void)
 	/************************************************************
 	 * Create the hash table for working interpreters
 	 ************************************************************/
-	hash_ctl.keysize = sizeof(Oid);
-	hash_ctl.entrysize = sizeof(pltcl_interp_desc);
-	pltcl_interp_htab = hash_create("PL/Tcl interpreters",
-									8,
-									&hash_ctl,
-									HASH_ELEM | HASH_BLOBS);
+	pltcl_interp_htab = hash_make_cxt(pltcl_interp_desc, user_id,
+									  "PL/Tcl interpreters", 8,
+									  TopMemoryContext);
 
 	/************************************************************
 	 * Create the hash table for function lookup
 	 ************************************************************/
-	hash_ctl.keysize = sizeof(pltcl_proc_key);
-	hash_ctl.entrysize = sizeof(pltcl_proc_ptr);
-	pltcl_proc_htab = hash_create("PL/Tcl functions",
-								  100,
-								  &hash_ctl,
-								  HASH_ELEM | HASH_BLOBS);
+	pltcl_proc_htab = hash_make_cxt(pltcl_proc_ptr, proc_key,
+									"PL/Tcl functions", 100,
+									TopMemoryContext);
 
 	/************************************************************
 	 * Define PL/Tcl's custom GUCs
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index eac988c21e7..c68de93b535 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -22,6 +22,7 @@
 #include "pgtz.h"
 #include "storage/fd.h"
 #include "utils/hsearch.h"
+#include "utils/memutils.h"
 
 
 /* Current session timezone (controlled by TimeZone GUC) */
@@ -201,15 +202,8 @@ static HTAB *timezone_cache = NULL;
 static bool
 init_timezone_hashtable(void)
 {
-	HASHCTL		hash_ctl;
-
-	hash_ctl.keysize = TZ_STRLEN_MAX + 1;
-	hash_ctl.entrysize = sizeof(pg_tz_cache);
-
-	timezone_cache = hash_create("Timezones",
-								 4,
-								 &hash_ctl,
-								 HASH_ELEM | HASH_STRINGS);
+	timezone_cache = hash_make_cxt(pg_tz_cache, tznameupper,
+								   "Timezones", 4, TopMemoryContext);
 	if (!timezone_cache)
 		return false;
 
-- 
2.53.0

