From 68cad956d293bb51c3591022fe7e349282e5c1f3 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <postgres@jeltef.nl>
Date: Thu, 4 Dec 2025 15:39:09 +0100
Subject: [PATCH v13 4/5] Use foreach_hash macro throughout the codebase

This starts using the new foreach_hash macro throughout the codebase.
This makes code easier to read, but obviously does introduce
backpatching problems. We can choose not to do this refactor to avoid
that. Or we could instead choose to do the refactor and then backpatch
these new macros so they can be used in backpatched code.

At the very least we should choose a few places where we use the new
macros to make sure they have coverage.
---
 contrib/dblink/dblink.c                       |  5 +-
 .../pg_stat_statements/pg_stat_statements.c   | 39 +++------
 contrib/pg_trgm/trgm_regexp.c                 | 18 +---
 contrib/postgres_fdw/connection.c             | 26 ++----
 contrib/postgres_fdw/shippable.c              |  6 +-
 src/backend/access/heap/rewriteheap.c         | 18 +---
 src/backend/access/transam/xlogutils.c        | 20 +----
 src/backend/catalog/pg_enum.c                 | 16 ++--
 src/backend/catalog/storage.c                 | 18 ++--
 src/backend/commands/async.c                  | 21 ++---
 src/backend/commands/prepare.c                | 12 +--
 src/backend/commands/tablecmds.c              |  7 +-
 src/backend/optimizer/util/predtest.c         |  7 +-
 src/backend/parser/parse_oper.c               |  7 +-
 src/backend/partitioning/partdesc.c           |  6 +-
 src/backend/postmaster/autovacuum.c           |  7 +-
 src/backend/replication/logical/relation.c    | 37 ++------
 .../replication/logical/reorderbuffer.c       | 12 +--
 src/backend/replication/pgoutput/pgoutput.c   | 24 ++---
 src/backend/storage/ipc/shmem.c               | 12 +--
 src/backend/storage/ipc/standby.c             | 12 +--
 src/backend/storage/lmgr/lock.c               | 67 +++-----------
 src/backend/storage/lmgr/lwlock.c             |  7 +-
 src/backend/storage/lmgr/predicate.c          | 18 +---
 src/backend/storage/smgr/smgr.c               |  7 +-
 src/backend/storage/sync/sync.c               | 17 ++--
 src/backend/tsearch/ts_typanalyze.c           | 11 +--
 src/backend/utils/activity/wait_event.c       |  6 +-
 src/backend/utils/adt/array_typanalyze.c      | 17 ++--
 src/backend/utils/cache/relcache.c            | 67 +++++---------
 src/backend/utils/cache/relfilenumbermap.c    |  6 +-
 src/backend/utils/cache/spccache.c            |  6 +-
 src/backend/utils/cache/ts_cache.c            |  5 +-
 src/backend/utils/cache/typcache.c            | 14 +--
 src/backend/utils/misc/guc.c                  | 30 ++-----
 src/backend/utils/mmgr/portalmem.c            | 87 ++++---------------
 src/pl/plperl/plperl.c                        |  6 +-
 37 files changed, 154 insertions(+), 547 deletions(-)

diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 53a3a090d34..d03669d163e 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1279,14 +1279,11 @@ PG_FUNCTION_INFO_V1(dblink_get_connections);
 Datum
 dblink_get_connections(PG_FUNCTION_ARGS)
 {
-	HASH_SEQ_STATUS status;
-	remoteConnHashEnt *hentry;
 	ArrayBuildState *astate = NULL;
 
 	if (remoteConnHash)
 	{
-		hash_seq_init(&status, remoteConnHash);
-		while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(remoteConnHashEnt, hentry, remoteConnHash)
 		{
 			/* ignore it if it's not an open connection */
 			if (hentry->rconn.conn == NULL)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 2af828a9991..96621af5871 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -742,9 +742,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	FILE	   *file;
 	char	   *qbuffer = NULL;
 	Size		qbuffer_size = 0;
-	HASH_SEQ_STATUS hash_seq;
 	int32		num_entries;
-	pgssEntry  *entry;
 
 	/* Don't try to dump during a crash. */
 	if (code)
@@ -778,8 +776,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 	 * When serializing to disk, we store query texts immediately after their
 	 * entry data.  Any orphaned query texts are thereby excluded.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			len = entry->query_len;
 		char	   *qstr = qtext_fetch(entry->query_offset, len,
@@ -791,8 +788,8 @@ pgss_shmem_shutdown(int code, Datum arg)
 		if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
 			fwrite(qstr, 1, len + 1, file) != len + 1)
 		{
-			/* note: we assume hash_seq_term won't change errno */
-			hash_seq_term(&hash_seq);
+			/* note: we assume foreach_hash_term won't change errno */
+			foreach_hash_term(entry);
 			goto error;
 		}
 	}
@@ -1697,8 +1694,6 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 	Size		qbuffer_size = 0;
 	Size		extent = 0;
 	int			gc_count = 0;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 
 	/*
 	 * Superusers or roles with the privileges of pg_read_all_stats members
@@ -1828,8 +1823,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 		}
 	}
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		Datum		values[PG_STAT_STATEMENTS_COLS];
 		bool		nulls[PG_STAT_STATEMENTS_COLS];
@@ -2174,9 +2168,7 @@ entry_cmp(const void *lhs, const void *rhs)
 static void
 entry_dealloc(void)
 {
-	HASH_SEQ_STATUS hash_seq;
 	pgssEntry **entries;
-	pgssEntry  *entry;
 	int			nvictims;
 	int			i;
 	Size		tottextlen;
@@ -2200,8 +2192,7 @@ entry_dealloc(void)
 	tottextlen = 0;
 	nvalidtexts = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entries[i++] = entry;
 		/* "Sticky" entries get a different usage decay rate. */
@@ -2513,8 +2504,6 @@ gc_qtexts(void)
 	char	   *qbuffer;
 	Size		qbuffer_size;
 	FILE	   *qfile = NULL;
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	Size		extent;
 	int			nentries;
 
@@ -2556,8 +2545,7 @@ gc_qtexts(void)
 	extent = 0;
 	nentries = 0;
 
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		int			query_len = entry->query_len;
 		char	   *qry = qtext_fetch(entry->query_offset,
@@ -2580,7 +2568,7 @@ gc_qtexts(void)
 					(errcode_for_file_access(),
 					 errmsg("could not write file \"%s\": %m",
 							PGSS_TEXT_FILE)));
-			hash_seq_term(&hash_seq);
+			foreach_hash_term(entry);
 			goto gc_fail;
 		}
 
@@ -2648,8 +2636,7 @@ gc_fail:
 	 * Since the contents of the external file are now uncertain, mark all
 	 * hashtable entries as having invalid texts.
 	 */
-	hash_seq_init(&hash_seq, pgss_hash);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(pgssEntry, entry, pgss_hash)
 	{
 		entry->query_offset = 0;
 		entry->query_len = -1;
@@ -2713,8 +2700,6 @@ if (e) { \
 static TimestampTz
 entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 {
-	HASH_SEQ_STATUS hash_seq;
-	pgssEntry  *entry;
 	FILE	   *qfile;
 	int64		num_entries;
 	int64		num_remove = 0;
@@ -2734,6 +2719,8 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
 	{
 		/* If all the parameters are available, use the fast path. */
+		pgssEntry  *entry;
+
 		memset(&key, 0, sizeof(pgssHashKey));
 		key.userid = userid;
 		key.dbid = dbid;
@@ -2757,8 +2744,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
 	{
 		/* Reset entries corresponding to valid parameters. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			if ((!userid || entry->key.userid == userid) &&
 				(!dbid || entry->key.dbid == dbid) &&
@@ -2771,8 +2757,7 @@ entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
 	else
 	{
 		/* Reset all entries. */
-		hash_seq_init(&hash_seq, pgss_hash);
-		while ((entry = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(pgssEntry, entry, pgss_hash)
 		{
 			SINGLE_ENTRY_RESET(entry);
 		}
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
index 383ce6b31b1..26403f8b02a 100644
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -1452,10 +1452,8 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2)
 static bool
 selectColorTrigrams(TrgmNFA *trgmNFA)
 {
-	HASH_SEQ_STATUS scan_status;
 	int			arcsCount = trgmNFA->arcsCount,
 				i;
-	TrgmState  *state;
 	ColorTrgmInfo *colorTrgms;
 	int64		totalTrgmCount;
 	float4		totalTrgmPenalty;
@@ -1466,8 +1464,7 @@ selectColorTrigrams(TrgmNFA *trgmNFA)
 	trgmNFA->colorTrgms = colorTrgms;
 
 	i = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
@@ -1929,8 +1926,6 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	int			snumber = 2,
 				arcIndex,
 				arcsCount;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmPackArcInfo *arcs;
 	TrgmPackedArc *packedArcs;
 	TrgmPackedGraph *result;
@@ -1938,8 +1933,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 				j;
 
 	/* Enumerate surviving states, giving init and fin reserved numbers */
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		while (state->parent)
 			state = state->parent;
@@ -1961,8 +1955,7 @@ packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
 	/* Collect array of all arcs */
 	arcs = palloc_array(TrgmPackArcInfo, trgmNFA->arcsCount);
 	arcIndex = 0;
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		TrgmState  *source = state;
 		ListCell   *cell;
@@ -2201,16 +2194,13 @@ static void
 printTrgmNFA(TrgmNFA *trgmNFA)
 {
 	StringInfoData buf;
-	HASH_SEQ_STATUS scan_status;
-	TrgmState  *state;
 	TrgmState  *initstate = NULL;
 
 	initStringInfo(&buf);
 
 	appendStringInfoString(&buf, "\ndigraph transformedNFA {\n");
 
-	hash_seq_init(&scan_status, trgmNFA->states);
-	while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrgmState, state, trgmNFA->states)
 	{
 		ListCell   *cell;
 
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 0faf7e69280..8fba2e23603 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -1066,8 +1066,6 @@ pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
 static void
 pgfdw_xact_callback(XactEvent event, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
 
@@ -1079,8 +1077,7 @@ pgfdw_xact_callback(XactEvent event, void *arg)
 	 * Scan all connection cache entries to find open remote transactions, and
 	 * close them.
 	 */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		PGresult   *res;
 
@@ -1217,8 +1214,6 @@ static void
 pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 					   SubTransactionId parentSubid, void *arg)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	int			curlevel;
 	List	   *pending_entries = NIL;
 	List	   *cancel_requested = NIL;
@@ -1237,8 +1232,7 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 	 * of the current level, and close them.
 	 */
 	curlevel = GetCurrentTransactionNestLevel();
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		char		sql[100];
 
@@ -1329,14 +1323,10 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
 static void
 pgfdw_inval_callback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
-
 	Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID);
 
 	/* ConnectionHash must exist already, if we're registered */
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore invalid entries */
 		if (entry->conn == NULL)
@@ -2187,8 +2177,6 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 									  enum pgfdwVersion api_version)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 
 	InitMaterializedSRF(fcinfo, 0);
 
@@ -2211,8 +2199,7 @@ postgres_fdw_get_connections_internal(FunctionCallInfo fcinfo,
 			elog(ERROR, "incorrect number of output arguments");
 	}
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		ForeignServer *server;
 		Datum		values[POSTGRES_FDW_GET_CONNECTIONS_COLS] = {0};
@@ -2464,8 +2451,6 @@ postgres_fdw_disconnect_all(PG_FUNCTION_ARGS)
 static bool
 disconnect_cached_connections(Oid serverid)
 {
-	HASH_SEQ_STATUS scan;
-	ConnCacheEntry *entry;
 	bool		all = !OidIsValid(serverid);
 	bool		result = false;
 
@@ -2476,8 +2461,7 @@ disconnect_cached_connections(Oid serverid)
 	if (!ConnectionHash)
 		return false;
 
-	hash_seq_init(&scan, ConnectionHash);
-	while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+	foreach_hash(ConnCacheEntry, entry, ConnectionHash)
 	{
 		/* Ignore cache entry if no open connection right now. */
 		if (!entry->conn)
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
index cba1c0967a9..014c537115a 100644
--- a/contrib/postgres_fdw/shippable.c
+++ b/contrib/postgres_fdw/shippable.c
@@ -66,17 +66,13 @@ static void
 InvalidateShippableCacheCallback(Datum arg, SysCacheIdentifier cacheid,
 								 uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	ShippableCacheEntry *entry;
-
 	/*
 	 * In principle we could flush only cache entries relating to the
 	 * pg_foreign_server entry being outdated; but that would be more
 	 * complicated, and it's probably not worth the trouble.  So for now, just
 	 * flush all entries.
 	 */
-	hash_seq_init(&status, ShippableCacheHash);
-	while ((entry = (ShippableCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(ShippableCacheEntry, entry, ShippableCacheHash)
 	{
 		if (hash_search(ShippableCacheHash,
 						&entry->key,
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index f58b4b2b205..bc509082163 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -291,16 +291,11 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
 void
 end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	UnresolvedTup unresolved;
-
 	/*
 	 * Write any remaining tuples in the UnresolvedTups table. If we have any
 	 * left, they should in fact be dead, but let's err on the safe side.
 	 */
-	hash_seq_init(&seq_status, state->rs_unresolved_tups);
-
-	while ((unresolved = hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(UnresolvedTupData, unresolved, state->rs_unresolved_tups)
 	{
 		ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
 		raw_heap_insert(state, unresolved->tuple);
@@ -798,8 +793,6 @@ logical_begin_heap_rewrite(RewriteState state)
 static void
 logical_heap_rewrite_flush_mappings(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
 	dlist_mutable_iter iter;
 
 	Assert(state->rs_logical_rewrite);
@@ -811,8 +804,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 	elog(DEBUG1, "flushing %u logical rewrite mapping entries",
 		 state->rs_num_rewrite_mappings);
 
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		char	   *waldata;
 		char	   *waldata_start;
@@ -896,9 +888,6 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
 static void
 logical_end_heap_rewrite(RewriteState state)
 {
-	HASH_SEQ_STATUS seq_status;
-	RewriteMappingFile *src;
-
 	/* done, no logical rewrite in progress */
 	if (!state->rs_logical_rewrite)
 		return;
@@ -908,8 +897,7 @@ logical_end_heap_rewrite(RewriteState state)
 		logical_heap_rewrite_flush_mappings(state);
 
 	/* Iterate over all mappings we have written and fsync the files. */
-	hash_seq_init(&seq_status, state->rs_logical_mappings);
-	while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+	foreach_hash(RewriteMappingFile, src, state->rs_logical_mappings)
 	{
 		if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
 			ereport(data_sync_elevel(ERROR),
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index d11e42c9490..3c7fc65b8d4 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -160,15 +160,10 @@ static void
 forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 					 BlockNumber minblkno)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (RelFileLocatorEquals(hentry->key.locator, locator) &&
 			hentry->key.forkno == forkno &&
@@ -190,15 +185,10 @@ forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
 static void
 forget_invalid_pages_db(Oid dbid)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
-
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		if (hentry->key.locator.dbOid == dbid)
 		{
@@ -228,20 +218,16 @@ XLogHaveInvalidPages(void)
 void
 XLogCheckInvalidPages(void)
 {
-	HASH_SEQ_STATUS status;
-	xl_invalid_page *hentry;
 	bool		foundone = false;
 
 	if (invalid_page_tab == NULL)
 		return;					/* nothing to do */
 
-	hash_seq_init(&status, invalid_page_tab);
-
 	/*
 	 * Our strategy is to emit WARNING messages for all remaining entries and
 	 * only PANIC after we've dumped all the available info.
 	 */
-	while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
+	foreach_hash(xl_invalid_page, hentry, invalid_page_tab)
 	{
 		report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
 							hentry->key.blkno, hentry->present);
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 86c8bada557..be4a8ecd9e6 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -838,12 +838,10 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the types hash table, if there is one. */
 	if (uncommitted_enum_types)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_types);
-		while ((value = (Oid *) hash_seq_search(&status)))
+		foreach_hash(Oid, value, uncommitted_enum_types)
+		{
 			*serialized++ = *value;
+		}
 	}
 
 	/* Write out the terminator. */
@@ -852,12 +850,10 @@ SerializeUncommittedEnums(void *space, Size size)
 	/* Write out all the OIDs from the values hash table, if there is one. */
 	if (uncommitted_enum_values)
 	{
-		HASH_SEQ_STATUS status;
-		Oid		   *value;
-
-		hash_seq_init(&status, uncommitted_enum_values);
-		while ((value = (Oid *) hash_seq_search(&status)))
+		foreach_hash(Oid, value, uncommitted_enum_values)
+		{
 			*serialized++ = *value;
+		}
 	}
 
 	/* Write out the terminator. */
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index db3e08319b5..27d21b556be 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -594,10 +594,7 @@ void
 SerializePendingSyncs(Size maxSize, char *startAddress)
 {
 	HTAB	   *tmphash;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *sync;
 	PendingRelDelete *delete;
-	RelFileLocator *src;
 	RelFileLocator *dest = (RelFileLocator *) startAddress;
 
 	if (!pendingSyncHash)
@@ -608,9 +605,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 						   hash_get_num_entries(pendingSyncHash));
 
 	/* collect all rlocator from pending syncs */
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, sync, pendingSyncHash)
+	{
 		(void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL);
+	}
 
 	/* remove deleted rnodes */
 	for (delete = pendingDeletes; delete != NULL; delete = delete->next)
@@ -618,9 +616,10 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
 			(void) hash_search(tmphash, &delete->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, tmphash);
-	while ((src = (RelFileLocator *) hash_seq_search(&scan)))
+	foreach_hash(RelFileLocator, src, tmphash)
+	{
 		*dest++ = *src;
+	}
 
 	hash_destroy(tmphash);
 
@@ -733,8 +732,6 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 	int			nrels = 0,
 				maxrels = 0;
 	SMgrRelation *srels = NULL;
-	HASH_SEQ_STATUS scan;
-	PendingRelSync *pendingsync;
 
 	Assert(GetCurrentTransactionNestLevel() == 1);
 
@@ -763,8 +760,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
 			(void) hash_search(pendingSyncHash, &pending->rlocator,
 							   HASH_REMOVE, NULL);
 
-	hash_seq_init(&scan, pendingSyncHash);
-	while ((pendingsync = (PendingRelSync *) hash_seq_search(&scan)))
+	foreach_hash(PendingRelSync, pendingsync, pendingSyncHash)
 	{
 		ForkNumber	fork;
 		BlockNumber nblocks[MAX_FORKNUM + 1];
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 0c21f528498..1e16afa0bab 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -1229,14 +1229,10 @@ PreCommit_Notify(void)
 		pendingNotifies->uniqueChannelNames = NIL;
 		if (pendingNotifies->uniqueChannelHash != NULL)
 		{
-			HASH_SEQ_STATUS status;
-			ChannelName *channelEntry;
-
-			hash_seq_init(&status, pendingNotifies->uniqueChannelHash);
-			while ((channelEntry = (ChannelName *) hash_seq_search(&status)) != NULL)
+			foreach_hash(ChannelName, channelEntry, pendingNotifies->uniqueChannelHash)
 				pendingNotifies->uniqueChannelNames =
-					lappend(pendingNotifies->uniqueChannelNames,
-							channelEntry->channel);
+				lappend(pendingNotifies->uniqueChannelNames,
+						channelEntry->channel);
 		}
 		else
 		{
@@ -1653,8 +1649,6 @@ PrepareTableEntriesForUnlisten(const char *channel)
 static void
 PrepareTableEntriesForUnlistenAll(void)
 {
-	HASH_SEQ_STATUS seq;
-	ChannelName *channelEntry;
 	PendingListenEntry *pending;
 
 	/*
@@ -1662,8 +1656,7 @@ PrepareTableEntriesForUnlistenAll(void)
 	 * we are listening on or have prepared to listen on.  Record an UNLISTEN
 	 * action for each one, overwriting any earlier attempt to LISTEN.
 	 */
-	hash_seq_init(&seq, localChannelTable);
-	while ((channelEntry = (ChannelName *) hash_seq_search(&seq)) != NULL)
+	foreach_hash(ChannelName, channelEntry, localChannelTable)
 	{
 		pending = (PendingListenEntry *)
 			hash_search(pendingListenActions, channelEntry->channel, HASH_ENTER, NULL);
@@ -1710,9 +1703,6 @@ RemoveListenerFromChannel(GlobalChannelEntry **entry_ptr,
 static void
 ApplyPendingListenActions(bool isCommit)
 {
-	HASH_SEQ_STATUS seq;
-	PendingListenEntry *pending;
-
 	/* Quick exit if nothing to do */
 	if (pendingListenActions == NULL)
 		return;
@@ -1722,8 +1712,7 @@ ApplyPendingListenActions(bool isCommit)
 		elog(PANIC, "global channel table missing post-commit/abort");
 
 	/* For each staged action ... */
-	hash_seq_init(&seq, pendingListenActions);
-	while ((pending = (PendingListenEntry *) hash_seq_search(&seq)) != NULL)
+	foreach_hash(PendingListenEntry, pending, pendingListenActions)
 	{
 		GlobalChannelKey key;
 		GlobalChannelEntry *entry;
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 0ab08cceb9c..968cea8c926 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -535,16 +535,12 @@ DropPreparedStatement(const char *stmt_name, bool showError)
 void
 DropAllPreparedStatements(void)
 {
-	HASH_SEQ_STATUS seq;
-	PreparedStatement *entry;
-
 	/* nothing cached */
 	if (!prepared_queries)
 		return;
 
 	/* walk over cache */
-	hash_seq_init(&seq, prepared_queries);
-	while ((entry = hash_seq_search(&seq)) != NULL)
+	foreach_hash(PreparedStatement, entry, prepared_queries)
 	{
 		/* Release the plancache entry */
 		DropCachedPlan(entry->plansource);
@@ -691,11 +687,7 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
 	/* hash table might be uninitialized */
 	if (prepared_queries)
 	{
-		HASH_SEQ_STATUS hash_seq;
-		PreparedStatement *prep_stmt;
-
-		hash_seq_init(&hash_seq, prepared_queries);
-		while ((prep_stmt = hash_seq_search(&hash_seq)) != NULL)
+		foreach_hash(PreparedStatement, prep_stmt, prepared_queries)
 		{
 			TupleDesc	result_desc;
 			Datum		values[8];
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index cb7e00ad03f..f4bc02bc4e8 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2275,14 +2275,9 @@ ExecuteTruncateGuts(List *explicit_rels,
 	/* Now go through the hash table, and truncate foreign tables */
 	if (ft_htab)
 	{
-		ForeignTruncateInfo *ft_info;
-		HASH_SEQ_STATUS seq;
-
-		hash_seq_init(&seq, ft_htab);
-
 		PG_TRY();
 		{
-			while ((ft_info = hash_seq_search(&seq)) != NULL)
+			foreach_hash(ForeignTruncateInfo, ft_info, ft_htab)
 			{
 				FdwRoutine *routine = GetFdwRoutineByServerId(ft_info->serverid);
 
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 0cf77394abc..d3af50b4fa4 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -2345,15 +2345,10 @@ static void
 InvalidateOprProofCacheCallBack(Datum arg, SysCacheIdentifier cacheid,
 								uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprProofCacheEntry *hentry;
-
 	Assert(OprProofCacheHash != NULL);
 
 	/* Currently we just reset all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprProofCacheHash);
-
-	while ((hentry = (OprProofCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprProofCacheEntry, hentry, OprProofCacheHash)
 	{
 		hentry->have_implic = false;
 		hentry->have_refute = false;
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index d1dd342d940..5ff6a66152e 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -1079,15 +1079,10 @@ static void
 InvalidateOprCacheCallBack(Datum arg, SysCacheIdentifier cacheid,
 						   uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	OprCacheEntry *hentry;
-
 	Assert(OprCacheHash != NULL);
 
 	/* Currently we just flush all entries; hard to be smarter ... */
-	hash_seq_init(&status, OprCacheHash);
-
-	while ((hentry = (OprCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(OprCacheEntry, hentry, OprCacheHash)
 	{
 		if (hash_search(OprCacheHash,
 						&hentry->key,
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index db26e4a82b6..3ecc938723b 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -478,11 +478,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
 void
 DestroyPartitionDirectory(PartitionDirectory pdir)
 {
-	HASH_SEQ_STATUS status;
-	PartitionDirectoryEntry *pde;
-
-	hash_seq_init(&status, pdir->pdir_hash);
-	while ((pde = hash_seq_search(&status)) != NULL)
+	foreach_hash(PartitionDirectoryEntry, pde, pdir->pdir_hash)
 		RelationDecrementReferenceCount(pde->rel);
 }
 
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 4cbb7ebf18c..0ddc21c09ad 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1048,8 +1048,6 @@ rebuild_database_list(Oid newdb)
 		TimestampTz current_time;
 		int			millis_increment;
 		avl_dbase  *dbary;
-		avl_dbase  *db;
-		HASH_SEQ_STATUS seq;
 		int			i;
 
 		/* put all the hash elements into an array */
@@ -1060,8 +1058,7 @@ rebuild_database_list(Oid newdb)
 #endif
 
 		i = 0;
-		hash_seq_init(&seq, dbhash);
-		while ((db = hash_seq_search(&seq)) != NULL)
+		foreach_hash(avl_dbase, db, dbhash)
 			memcpy(&(dbary[i++]), db, sizeof(avl_dbase));
 
 		/* sort the array */
@@ -1086,7 +1083,7 @@ rebuild_database_list(Oid newdb)
 		 */
 		for (i = 0; i < nelems; i++)
 		{
-			db = &(dbary[i]);
+			avl_dbase  *db = &(dbary[i]);
 
 			current_time = TimestampTzPlusMilliseconds(current_time,
 													   millis_increment);
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
index fcf295f1df1..fef9c357a3f 100644
--- a/src/backend/replication/logical/relation.c
+++ b/src/backend/replication/logical/relation.c
@@ -64,25 +64,19 @@ static Oid	FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remot
 static void
 logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepRelMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepRelMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 		{
 			if (entry->localreloid == reloid)
 			{
 				entry->localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -90,11 +84,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepRelMap);
-
-		while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepRelMapEntry, entry, LogicalRepRelMap)
 			entry->localrelvalid = false;
 	}
 }
@@ -531,25 +521,19 @@ logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode)
 static void
 logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 {
-	LogicalRepPartMapEntry *entry;
-
 	/* Just to be sure. */
 	if (LogicalRepPartMap == NULL)
 		return;
 
 	if (reloid != InvalidOid)
 	{
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
 		/* TODO, use inverse lookup hashtable? */
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 		{
 			if (entry->relmapentry.localreloid == reloid)
 			{
 				entry->relmapentry.localrelvalid = false;
-				hash_seq_term(&status);
+				foreach_hash_term(entry);
 				break;
 			}
 		}
@@ -557,11 +541,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 	else
 	{
 		/* invalidate all cache entries */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, LogicalRepPartMap);
-
-		while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LogicalRepPartMapEntry, entry, LogicalRepPartMap)
 			entry->relmapentry.localrelvalid = false;
 	}
 }
@@ -579,15 +559,12 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
 void
 logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
 {
-	HASH_SEQ_STATUS status;
-	LogicalRepPartMapEntry *part_entry;
 	LogicalRepRelMapEntry *entry;
 
 	if (LogicalRepPartMap == NULL)
 		return;
 
-	hash_seq_init(&status, LogicalRepPartMap);
-	while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LogicalRepPartMapEntry, part_entry, LogicalRepPartMap)
 	{
 		entry = &part_entry->relmapentry;
 
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 9fdb5d4d152..25bd0d8f088 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -5245,15 +5245,11 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
 static void
 ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferToastEnt *ent;
-
 	if (txn->toast_hash == NULL)
 		return;
 
 	/* sequentially walk over the hash and free everything */
-	hash_seq_init(&hstat, txn->toast_hash);
-	while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferToastEnt, ent, txn->toast_hash)
 	{
 		dlist_mutable_iter it;
 
@@ -5316,11 +5312,7 @@ typedef struct RewriteMappingFile
 static void
 DisplayMapping(HTAB *tuplecid_data)
 {
-	HASH_SEQ_STATUS hstat;
-	ReorderBufferTupleCidEnt *ent;
-
-	hash_seq_init(&hstat, tuplecid_data);
-	while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ReorderBufferTupleCidEnt, ent, tuplecid_data)
 	{
 		elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
 			 ent->key.rlocator.dbOid,
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 6be2ae090cf..9d920fe0af6 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -2377,13 +2377,9 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
 static void
 cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 {
-	HASH_SEQ_STATUS hash_seq;
-	RelationSyncEntry *entry;
-
 	Assert(RelationSyncCache != NULL);
 
-	hash_seq_init(&hash_seq, RelationSyncCache);
-	while ((entry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		/*
 		 * We can set the schema_sent flag for an entry that has committed xid
@@ -2412,8 +2408,6 @@ cleanup_rel_sync_cache(TransactionId xid, bool is_commit)
 static void
 rel_sync_cache_relation_cb(Datum arg, Oid relid)
 {
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2436,18 +2430,16 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 		 * Getting invalidations for relations that aren't in the table is
 		 * entirely normal.  So we don't care if it's found or not.
 		 */
-		entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
-												  HASH_FIND, NULL);
+		RelationSyncEntry *entry = hash_search(RelationSyncCache, &relid,
+											   HASH_FIND, NULL);
+
 		if (entry != NULL)
 			entry->replicate_valid = false;
 	}
 	else
 	{
 		/* Whole cache must be flushed. */
-		HASH_SEQ_STATUS status;
-
-		hash_seq_init(&status, RelationSyncCache);
-		while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 		{
 			entry->replicate_valid = false;
 		}
@@ -2463,9 +2455,6 @@ static void
 rel_sync_cache_publication_cb(Datum arg, SysCacheIdentifier cacheid,
 							  uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	RelationSyncEntry *entry;
-
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
 	 * RelationSyncCache is destroyed when the decoding finishes, but there is
@@ -2478,8 +2467,7 @@ rel_sync_cache_publication_cb(Datum arg, SysCacheIdentifier cacheid,
 	 * We have no easy way to identify which cache entries this invalidation
 	 * event might have affected, so just mark them all invalid.
 	 */
-	hash_seq_init(&status, RelationSyncCache);
-	while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelationSyncEntry, entry, RelationSyncCache)
 	{
 		entry->replicate_valid = false;
 	}
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 65128b0f508..50aa8d11a1f 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -534,8 +534,6 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_SIZES_COLS 4
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Size		named_allocated = 0;
 	Datum		values[PG_GET_SHMEM_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_SIZES_COLS];
@@ -544,11 +542,9 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
 	memset(nulls, 0, sizeof(nulls));
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		values[0] = CStringGetTextDatum(ent->key);
 		values[1] = Int64GetDatum((char *) ent->location - (char *) ShmemSegHdr);
@@ -591,8 +587,6 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 {
 #define PG_GET_SHMEM_NUMA_SIZES_COLS 3
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hstat;
-	ShmemIndexEnt *ent;
 	Datum		values[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	bool		nulls[PG_GET_SHMEM_NUMA_SIZES_COLS];
 	Size		os_page_size;
@@ -642,10 +636,8 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
 
 	LWLockAcquire(ShmemIndexLock, LW_SHARED);
 
-	hash_seq_init(&hstat, ShmemIndex);
-
 	/* output all allocated entries */
-	while ((ent = (ShmemIndexEnt *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(ShmemIndexEnt, ent, ShmemIndex)
 	{
 		int			i;
 		char	   *startptr,
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 65264999c8a..1f16a8df5cc 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1101,13 +1101,9 @@ StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 void
 StandbyReleaseAllLocks(void)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
 	elog(DEBUG2, "release all standby locks");
 
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		StandbyReleaseXidEntryLocks(entry);
 		hash_search(RecoveryLockXidHash, entry, HASH_REMOVE, NULL);
@@ -1125,11 +1121,7 @@ StandbyReleaseAllLocks(void)
 void
 StandbyReleaseOldLocks(TransactionId oldxid)
 {
-	HASH_SEQ_STATUS status;
-	RecoveryLockXidEntry *entry;
-
-	hash_seq_init(&status, RecoveryLockXidHash);
-	while ((entry = hash_seq_search(&status)))
+	foreach_hash(RecoveryLockXidEntry, entry, RecoveryLockXidHash)
 	{
 		Assert(TransactionIdIsValid(entry->xid));
 
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 1d5ea7c051c..e5704cef90e 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -2304,11 +2304,9 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 void
 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 {
-	HASH_SEQ_STATUS status;
 	LockMethod	lockMethodTable;
 	int			i,
 				numLockModes;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	int			partition;
 	bool		have_fast_path_lwlock = false;
@@ -2341,9 +2339,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 	 * pointers.  Fast-path locks are cleaned up during the locallock table
 	 * scan, though.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/*
 		 * If the LOCALLOCK entry is unused, something must've gone wrong
@@ -2578,15 +2574,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
 void
 LockReleaseSession(LOCKMETHODID lockmethodid)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
 		elog(ERROR, "unrecognized lock method: %d", lockmethodid);
 
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		/* Ignore items that are not of the specified lock method */
 		if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
@@ -2610,12 +2601,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 {
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			ReleaseLockIfHeld(locallock, false);
 	}
 	else
@@ -2709,12 +2695,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
 
 	if (locallocks == NULL)
 	{
-		HASH_SEQ_STATUS status;
-		LOCALLOCK  *locallock;
-
-		hash_seq_init(&status, LockMethodLocalHash);
-
-		while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+		foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 			LockReassignOwner(locallock, parent);
 	}
 	else
@@ -3393,17 +3374,13 @@ CheckForSessionAndXactLocks(void)
 	} PerLockTagEntry;
 
 	HTAB	   *lockhtab;
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 
 	/* Create a local hash table keyed by LOCKTAG only */
 	lockhtab = hash_make(PerLockTagEntry, lock,
 						 "CheckForSessionAndXactLocks table", 256);
 
 	/* Scan local lock table to find entries for each LOCKTAG */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		PerLockTagEntry *hentry;
@@ -3466,16 +3443,11 @@ CheckForSessionAndXactLocks(void)
 void
 AtPrepare_Locks(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
-
 	/* First, verify there aren't locks of both xact and session level */
 	CheckForSessionAndXactLocks();
 
 	/* Now do the per-locallock cleanup work */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		TwoPhaseLockRecord record;
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
@@ -3563,8 +3535,6 @@ void
 PostPrepare_Locks(FullTransactionId fxid)
 {
 	PGPROC	   *newproc = TwoPhaseGetDummyProc(fxid, false);
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	LOCK	   *lock;
 	PROCLOCK   *proclock;
 	PROCLOCKTAG proclocktag;
@@ -3586,9 +3556,7 @@ PostPrepare_Locks(FullTransactionId fxid)
 	 * pointing to the same proclock, and we daren't end up with any dangling
 	 * pointers.
 	 */
-	hash_seq_init(&status, LockMethodLocalHash);
-
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, LockMethodLocalHash)
 	{
 		LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 		bool		haveSessionLock;
@@ -3784,8 +3752,6 @@ LockData *
 GetLockStatusData(void)
 {
 	LockData   *data;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			els;
 	int			el;
 	int			i;
@@ -3921,9 +3887,7 @@ GetLockStatusData(void)
 	}
 
 	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PGPROC	   *proc = proclock->tag.myProc;
 		LOCK	   *lock = proclock->tag.myLock;
@@ -4161,8 +4125,6 @@ xl_standby_lock *
 GetRunningTransactionLocks(int *nlocks)
 {
 	xl_standby_lock *accessExclusiveLocks;
-	PROCLOCK   *proclock;
-	HASH_SEQ_STATUS seqstat;
 	int			i;
 	int			index;
 	int			els;
@@ -4184,10 +4146,9 @@ GetRunningTransactionLocks(int *nlocks)
 	 */
 	accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
 
-	/* Now scan the tables to copy the data */
-	hash_seq_init(&seqstat, LockMethodProcLockHash);
-
 	/*
+	 * Now scan the tables to copy the data.
+	 *
 	 * If lock is a currently granted AccessExclusiveLock then it will have
 	 * just one proclock holder, so locks are never accessed twice in this
 	 * particular case. Don't copy this code for use elsewhere because in the
@@ -4195,7 +4156,7 @@ GetRunningTransactionLocks(int *nlocks)
 	 * non-exclusive lock types.
 	 */
 	index = 0;
-	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		/* make sure this definition matches the one used in LockAcquire */
 		if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
@@ -4290,18 +4251,14 @@ void
 DumpAllLocks(void)
 {
 	PGPROC	   *proc;
-	PROCLOCK   *proclock;
 	LOCK	   *lock;
-	HASH_SEQ_STATUS status;
 
 	proc = MyProc;
 
 	if (proc && proc->waitLock)
 		LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
 
-	hash_seq_init(&status, LockMethodProcLockHash);
-
-	while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PROCLOCK, proclock, LockMethodProcLockHash)
 	{
 		PROCLOCK_PRINT("DumpAllLocks", proclock);
 
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 8fea811ecbe..d3508e0b90e 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -325,15 +325,10 @@ init_lwlock_stats(void)
 static void
 print_lwlock_stats(int code, Datum arg)
 {
-	HASH_SEQ_STATUS scan;
-	lwlock_stats *lwstats;
-
-	hash_seq_init(&scan, lwlock_stats_htab);
-
 	/* Grab an LWLock to keep different backends from mixing reports */
 	LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
 
-	while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
+	foreach_hash(lwlock_stats, lwstats, lwlock_stats_htab)
 	{
 		fprintf(stderr,
 				"PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 447b1e39804..b385ca0d45e 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1453,8 +1453,6 @@ GetPredicateLockStatusData(void)
 	int			i;
 	int			els,
 				el;
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCK *predlock;
 
 	data = palloc_object(PredicateLockData);
 
@@ -1474,11 +1472,9 @@ GetPredicateLockStatusData(void)
 
 
 	/* Scan through PredicateLockHash and copy contents */
-	hash_seq_init(&seqstat, PredicateLockHash);
-
 	el = 0;
 
-	while ((predlock = (PREDICATELOCK *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCK, predlock, PredicateLockHash)
 	{
 		data->locktags[el] = predlock->tag.myTarget->tag;
 		data->xacts[el] = *predlock->tag.myXact;
@@ -2935,8 +2931,6 @@ exit:
 static void
 DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *oldtarget;
 	PREDICATELOCKTARGET *heaptarget;
 	Oid			dbId;
 	Oid			relId;
@@ -2992,9 +2986,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
 		RemoveScratchTarget(true);
 
 	/* Scan through target map */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, oldtarget, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
@@ -4417,8 +4409,6 @@ CheckForSerializableConflictIn(Relation relation, const ItemPointerData *tid, Bl
 void
 CheckTableForSerializableConflictIn(Relation relation)
 {
-	HASH_SEQ_STATUS seqstat;
-	PREDICATELOCKTARGET *target;
 	Oid			dbId;
 	Oid			heapId;
 	int			i;
@@ -4452,9 +4442,7 @@ CheckTableForSerializableConflictIn(Relation relation)
 	LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
 
 	/* Scan through target list */
-	hash_seq_init(&seqstat, PredicateLockTargetHash);
-
-	while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+	foreach_hash(PREDICATELOCKTARGET, target, PredicateLockTargetHash)
 	{
 		dlist_mutable_iter iter;
 
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index e48b383b466..d1ad8b5e114 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -409,9 +409,6 @@ smgrdestroyall(void)
 void
 smgrreleaseall(void)
 {
-	HASH_SEQ_STATUS status;
-	SMgrRelation reln;
-
 	/* Nothing to do if hashtable not set up */
 	if (SMgrRelationHash == NULL)
 		return;
@@ -419,9 +416,7 @@ smgrreleaseall(void)
 	/* seems unsafe to accept interrupts while iterating */
 	HOLD_INTERRUPTS();
 
-	hash_seq_init(&status, SMgrRelationHash);
-
-	while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL)
+	foreach_hash(SMgrRelationData, reln, SMgrRelationHash)
 	{
 		smgrrelease(reln);
 	}
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index b8f3136e31d..afca80d2cff 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -281,8 +281,6 @@ ProcessSyncRequests(void)
 {
 	static bool sync_in_progress = false;
 
-	HASH_SEQ_STATUS hstat;
-	PendingFsyncEntry *entry;
 	int			absorb_counter;
 
 	/* Statistics on sync times */
@@ -339,8 +337,7 @@ ProcessSyncRequests(void)
 	if (sync_in_progress)
 	{
 		/* prior try failed, so update any stale cycle_ctr values */
-		hash_seq_init(&hstat, pendingOps);
-		while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, entry, pendingOps)
 		{
 			entry->cycle_ctr = sync_cycle_ctr;
 		}
@@ -354,8 +351,7 @@ ProcessSyncRequests(void)
 
 	/* Now scan the hashtable for fsync requests to process */
 	absorb_counter = FSYNCS_PER_ABSORB;
-	hash_seq_init(&hstat, pendingOps);
-	while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+	foreach_hash(PendingFsyncEntry, entry, pendingOps)
 	{
 		int			failures;
 
@@ -381,8 +377,8 @@ ProcessSyncRequests(void)
 			 * If in checkpointer, we want to absorb pending requests every so
 			 * often to prevent overflow of the fsync request queue.  It is
 			 * unspecified whether newly-added entries will be visited by
-			 * hash_seq_search, but we don't care since we don't need to
-			 * process them anyway.
+			 * foreach_hash, but we don't care since we don't need to process
+			 * them anyway.
 			 */
 			if (--absorb_counter <= 0)
 			{
@@ -496,13 +492,10 @@ RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
 	}
 	else if (type == SYNC_FILTER_REQUEST)
 	{
-		HASH_SEQ_STATUS hstat;
-		PendingFsyncEntry *pfe;
 		ListCell   *cell;
 
 		/* Cancel matching fsync requests */
-		hash_seq_init(&hstat, pendingOps);
-		while ((pfe = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
+		foreach_hash(PendingFsyncEntry, pfe, pendingOps)
 		{
 			if (pfe->tag.handler == ftag->handler &&
 				syncsw[ftag->handler].sync_filetagmatches(ftag, &pfe->tag))
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 49ec4ab4db5..1d04f5fd310 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -149,7 +149,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *lexemes_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -288,7 +287,6 @@ compute_tsvector_stats(VacAttrStats *stats,
 		int			nonnull_cnt = samplerows - null_cnt;
 		int			i;
 		TrackItem **sort_table;
-		TrackItem  *item;
 		int			track_len;
 		int			cutoff_freq;
 		int			minfreq,
@@ -315,10 +313,9 @@ compute_tsvector_stats(VacAttrStats *stats,
 		i = hash_get_num_entries(lexemes_tab);	/* surely enough space */
 		sort_table = palloc_array(TrackItem *, i);
 
-		hash_seq_init(&scan_status, lexemes_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, lexemes_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -462,11 +459,7 @@ compute_tsvector_stats(VacAttrStats *stats,
 static void
 prune_lexemes_hashtable(HTAB *lexemes_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, lexemes_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, lexemes_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index 3cf2d029d0c..c5db7582636 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -300,8 +300,6 @@ char	  **
 GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 {
 	char	  **waiteventnames;
-	WaitEventCustomEntryByName *hentry;
-	HASH_SEQ_STATUS hash_seq;
 	int			index;
 	int			els;
 
@@ -314,10 +312,8 @@ GetWaitEventCustomNames(uint32 classId, int *nwaitevents)
 	waiteventnames = palloc_array(char *, els);
 
 	/* Now scan the hash table to copy the data */
-	hash_seq_init(&hash_seq, WaitEventCustomHashByName);
-
 	index = 0;
-	while ((hentry = (WaitEventCustomEntryByName *) hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(WaitEventCustomEntryByName, hentry, WaitEventCustomHashByName)
 	{
 		if ((hentry->wait_event_info & WAIT_EVENT_CLASS_MASK) != classId)
 			continue;
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index bdc7e2237f6..281f6800310 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -223,7 +223,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
 	/* This is D from the LC algorithm. */
 	HTAB	   *elements_tab;
-	HASH_SEQ_STATUS scan_status;
 
 	/* This is the current bucket number from the LC algorithm */
 	int			b_current;
@@ -232,10 +231,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 	int			bucket_width;
 	int			array_no;
 	int64		element_no;
-	TrackItem  *item;
 	int			slot_idx;
 	HTAB	   *count_tab;
-	DECountItem *count_item;
 
 	extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
 
@@ -300,6 +297,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		int64		prev_element_no = element_no;
 		int			distinct_count;
 		bool		count_item_found;
+		DECountItem *count_item;
 
 		vacuum_delay_point(true);
 
@@ -338,6 +336,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		{
 			Datum		elem_value;
 			bool		found;
+			TrackItem  *item;
 
 			/* No null element processing other than flag setting here */
 			if (elem_nulls[j])
@@ -458,10 +457,9 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 		i = hash_get_num_entries(elements_tab); /* surely enough space */
 		sort_table = palloc_array(TrackItem *, i);
 
-		hash_seq_init(&scan_status, elements_tab);
 		track_len = 0;
 		maxfreq = 0;
-		while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+		foreach_hash(TrackItem, item, elements_tab)
 		{
 			if (item->frequency > cutoff_freq)
 			{
@@ -594,9 +592,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 			 * increasing count order.
 			 */
 			sorted_count_items = palloc_array(DECountItem *, count_items_count);
-			hash_seq_init(&scan_status, count_tab);
 			j = 0;
-			while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
+			foreach_hash(DECountItem, count_item, count_tab)
 			{
 				sorted_count_items[j++] = count_item;
 			}
@@ -683,11 +680,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 static void
 prune_element_hashtable(HTAB *elements_tab, int b_current)
 {
-	HASH_SEQ_STATUS scan_status;
-	TrackItem  *item;
-
-	hash_seq_init(&scan_status, elements_tab);
-	while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
+	foreach_hash(TrackItem, item, elements_tab)
 	{
 		if (item->frequency + item->delta <= b_current)
 		{
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 79a291324c0..53c616fbe17 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -177,7 +177,7 @@ static int	in_progress_list_maxlen;
  * eoxact_list[] stores the OIDs of relations that (might) need AtEOXact
  * cleanup work.  This list intentionally has limited size; if it overflows,
  * we fall back to scanning the whole hashtable.  There is no value in a very
- * large list because (1) at some point, a hash_seq_search scan is faster than
+ * large list because (1) at some point, a foreach_hash scan is faster than
  * retail lookups, and (2) the value of this is to reduce EOXact work for
  * short transactions, which can't have dirtied all that many tables anyway.
  * EOXactListAdd() does not bother to prevent duplicate list entries, so the
@@ -2957,13 +2957,13 @@ RelationCacheInvalidateEntry(Oid relationId)
  *
  *	 We do this in two phases: the first pass deletes deletable items, and
  *	 the second one rebuilds the rebuildable items.  This is essential for
- *	 safety, because hash_seq_search only copes with concurrent deletion of
+ *	 safety, because foreach_hash only copes with concurrent deletion of
  *	 the element it is currently visiting.  If a second SI overflow were to
  *	 occur while we are walking the table, resulting in recursive entry to
  *	 this routine, we could crash because the inner invocation blows away
  *	 the entry next to be visited by the outer scan.  But this way is OK,
  *	 because (a) during the first pass we won't process any more SI messages,
- *	 so hash_seq_search will complete safely; (b) during the second pass we
+ *	 so foreach_hash will complete safely; (b) during the second pass we
  *	 only hold onto pointers to nondeletable entries.
  *
  *	 The two-phase approach also makes it easy to update relfilenumbers for
@@ -2980,8 +2980,6 @@ RelationCacheInvalidateEntry(Oid relationId)
 void
 RelationCacheInvalidate(bool debug_discard)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	Relation	relation;
 	List	   *rebuildFirstList = NIL;
 	List	   *rebuildList = NIL;
@@ -2994,9 +2992,7 @@ RelationCacheInvalidate(bool debug_discard)
 	RelationMapInvalidateAll();
 
 	/* Phase 1 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		relation = idhentry->reldesc;
 
@@ -3141,12 +3137,9 @@ AssertPendingSyncConsistency(Relation relation)
 void
 AssertPendingSyncs_RelationCache(void)
 {
-	HASH_SEQ_STATUS status;
-	LOCALLOCK  *locallock;
 	Relation   *rels;
 	int			maxrels;
 	int			nrels;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3160,8 +3153,7 @@ AssertPendingSyncs_RelationCache(void)
 	maxrels = 1;
 	rels = palloc(maxrels * sizeof(*rels));
 	nrels = 0;
-	hash_seq_init(&status, GetLockMethodLocalHash());
-	while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
+	foreach_hash(LOCALLOCK, locallock, GetLockMethodLocalHash())
 	{
 		Oid			relid;
 		Relation	r;
@@ -3183,8 +3175,7 @@ AssertPendingSyncs_RelationCache(void)
 		rels[nrels++] = r;
 	}
 
-	hash_seq_init(&status, RelationIdCache);
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		AssertPendingSyncConsistency(idhentry->reldesc);
 
 	for (i = 0; i < nrels; i++)
@@ -3212,8 +3203,6 @@ AssertPendingSyncs_RelationCache(void)
 void
 AtEOXact_RelationCache(bool isCommit)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3225,7 +3214,7 @@ AtEOXact_RelationCache(bool isCommit)
 
 	/*
 	 * Unless the eoxact_list[] overflowed, we only need to examine the rels
-	 * listed in it.  Otherwise fall back on a hash_seq_search scan.
+	 * listed in it.  Otherwise fall back on a foreach_hash scan.
 	 *
 	 * For simplicity, eoxact_list[] entries are not deleted till end of
 	 * top-level transaction, even though we could remove them at
@@ -3236,8 +3225,7 @@ AtEOXact_RelationCache(bool isCommit)
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3246,10 +3234,11 @@ AtEOXact_RelationCache(bool isCommit)
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
-			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
-													 &eoxact_list[i],
-													 HASH_FIND,
-													 NULL);
+			RelIdCacheEnt *idhentry = hash_search(RelationIdCache,
+												  &eoxact_list[i],
+												  HASH_FIND,
+												  NULL);
+
 			if (idhentry != NULL)
 				AtEOXact_cleanup(idhentry->reldesc, isCommit);
 		}
@@ -3365,8 +3354,6 @@ void
 AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 						  SubTransactionId parentSubid)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -3379,13 +3366,12 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 
 	/*
 	 * Unless the eoxact_list[] overflowed, we only need to examine the rels
-	 * listed in it.  Otherwise fall back on a hash_seq_search scan.  Same
-	 * logic as in AtEOXact_RelationCache.
+	 * listed in it.  Otherwise fall back on a foreach_hash scan.  Same logic
+	 * as in AtEOXact_RelationCache.
 	 */
 	if (eoxact_list_overflowed)
 	{
-		hash_seq_init(&status, RelationIdCache);
-		while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+		foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 		{
 			AtEOSubXact_cleanup(idhentry->reldesc, isCommit,
 								mySubid, parentSubid);
@@ -3395,6 +3381,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
 	{
 		for (i = 0; i < eoxact_list_len; i++)
 		{
+			RelIdCacheEnt *idhentry;
+
 			idhentry = (RelIdCacheEnt *) hash_search(RelationIdCache,
 													 &eoxact_list[i],
 													 HASH_FIND,
@@ -4093,8 +4081,6 @@ RelationCacheInitializePhase2(void)
 void
 RelationCacheInitializePhase3(void)
 {
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	MemoryContext oldcxt;
 	bool		needNewCacheFile = !criticalSharedRelcachesBuilt;
 
@@ -4225,15 +4211,13 @@ RelationCacheInitializePhase3(void)
 	 *
 	 * Whenever we access the catalogs to read data, there is a possibility of
 	 * a shared-inval cache flush causing relcache entries to be removed.
-	 * Since hash_seq_search only guarantees to still work after the *current*
+	 * Since foreach_hash only guarantees to still work after the *current*
 	 * entry is removed, it's unsafe to continue the hashtable scan afterward.
 	 * We handle this by restarting the scan from scratch after each access.
 	 * This is theoretically O(N^2), but the number of entries that actually
 	 * need to be fixed is small enough that it doesn't matter.
 	 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		Relation	relation = idhentry->reldesc;
 		bool		restart = false;
@@ -4343,10 +4327,7 @@ RelationCacheInitializePhase3(void)
 
 		/* Now, restart the hashtable scan if needed */
 		if (restart)
-		{
-			hash_seq_term(&status);
-			hash_seq_init(&status, RelationIdCache);
-		}
+			foreach_hash_restart(idhentry, RelationIdCache);
 	}
 
 	/*
@@ -6599,8 +6580,6 @@ write_relcache_init_file(bool shared)
 	char		tempfilename[MAXPGPATH];
 	char		finalfilename[MAXPGPATH];
 	int			magic;
-	HASH_SEQ_STATUS status;
-	RelIdCacheEnt *idhentry;
 	int			i;
 
 	/*
@@ -6660,9 +6639,7 @@ write_relcache_init_file(bool shared)
 	/*
 	 * Write all the appropriate reldescs (in no particular order).
 	 */
-	hash_seq_init(&status, RelationIdCache);
-
-	while ((idhentry = (RelIdCacheEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelIdCacheEnt, idhentry, RelationIdCache)
 	{
 		Relation	rel = idhentry->reldesc;
 		Form_pg_class relform = rel->rd_rel;
diff --git a/src/backend/utils/cache/relfilenumbermap.c b/src/backend/utils/cache/relfilenumbermap.c
index 09b05dec9d0..00df7db4811 100644
--- a/src/backend/utils/cache/relfilenumbermap.c
+++ b/src/backend/utils/cache/relfilenumbermap.c
@@ -51,14 +51,10 @@ typedef struct
 static void
 RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
 {
-	HASH_SEQ_STATUS status;
-	RelfilenumberMapEntry *entry;
-
 	/* callback only gets registered after creating the hash */
 	Assert(RelfilenumberMapHash != NULL);
 
-	hash_seq_init(&status, RelfilenumberMapHash);
-	while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(RelfilenumberMapEntry, entry, RelfilenumberMapHash)
 	{
 		/*
 		 * If relid is InvalidOid, signaling a complete reset, we must remove
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index acff62a38be..088c0f69cca 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -56,11 +56,7 @@ static void
 InvalidateTableSpaceCacheCallback(Datum arg, SysCacheIdentifier cacheid,
 								  uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	TableSpaceCacheEntry *spc;
-
-	hash_seq_init(&status, TableSpaceCacheHash);
-	while ((spc = (TableSpaceCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TableSpaceCacheEntry, spc, TableSpaceCacheHash)
 	{
 		if (spc->opts)
 			pfree(spc->opts);
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 71e57076002..a8c808f87ed 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -95,11 +95,8 @@ static void
 InvalidateTSCacheCallBack(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 {
 	HTAB	   *hash = (HTAB *) DatumGetPointer(arg);
-	HASH_SEQ_STATUS status;
-	TSAnyCacheEntry *entry;
 
-	hash_seq_init(&status, hash);
-	while ((entry = (TSAnyCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TSAnyCacheEntry, entry, hash)
 		entry->isvalid = false;
 
 	/* Also invalidate the current-config cache if it's pg_ts_config */
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 5e6650404f4..666d42bad4a 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -2435,14 +2435,13 @@ InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
 static void
 TypeCacheRelCallback(Datum arg, Oid relid)
 {
-	TypeCacheEntry *typentry;
-
 	/*
 	 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
 	 * callback wouldn't be registered
 	 */
 	if (OidIsValid(relid))
 	{
+		TypeCacheEntry *typentry;
 		RelIdToTypeIdCacheEntry *relentry;
 
 		/*
@@ -2490,15 +2489,12 @@ TypeCacheRelCallback(Datum arg, Oid relid)
 	}
 	else
 	{
-		HASH_SEQ_STATUS status;
-
 		/*
 		 * Relid is invalid. By convention, we need to reset all composite
 		 * types in cache. Also, we should reset flags for domain types, and
 		 * we loop over all entries in hash, so, do it in a single scan.
 		 */
-		hash_seq_init(&status, TypeCacheHash);
-		while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
+		foreach_hash(TypeCacheEntry, typentry, TypeCacheHash)
 		{
 			if (typentry->typtype == TYPTYPE_COMPOSITE)
 			{
@@ -2588,12 +2584,8 @@ TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 static void
 TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
 {
-	HASH_SEQ_STATUS status;
-	TypeCacheEntry *typentry;
-
 	/* TypeCacheHash must exist, else this callback wouldn't be registered */
-	hash_seq_init(&status, TypeCacheHash);
-	while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(TypeCacheEntry, typentry, TypeCacheHash)
 	{
 		bool		hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
 
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index cd99d72e8a8..d5c513d0bd0 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -289,8 +289,6 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	const char *ConfFileWithError;
 	ConfigVariable *head,
 			   *tail;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 
 	/* Parse the main config file into a list of option names and values */
 	ConfFileWithError = ConfigFileName;
@@ -365,8 +363,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * need this so that we can tell below which ones have been removed from
 	 * the file since we last processed it.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -450,8 +447,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel)
 	 * boot-time defaults.  If such a variable can't be changed after startup,
 	 * report that and continue.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *gconf = hentry->gucvar;
 
@@ -840,8 +836,6 @@ struct config_generic **
 get_guc_variables(int *num_vars)
 {
 	struct config_generic **result;
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	int			i;
 
 	*num_vars = hash_get_num_entries(guc_hashtab);
@@ -849,8 +843,7 @@ get_guc_variables(int *num_vars)
 
 	/* Extract pointers from the hash table */
 	i = 0;
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 		result[i++] = hentry->gucvar;
 	Assert(i == *num_vars);
 
@@ -1401,9 +1394,6 @@ check_GUC_init(const struct config_generic *gconf)
 void
 InitializeGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Before log_line_prefix could possibly receive a nonempty setting, make
 	 * sure that timezone processing is minimally alive (see elog.c).
@@ -1419,8 +1409,7 @@ InitializeGUCOptions(void)
 	 * Load all variables with their compiled-in defaults, and initialize
 	 * status fields as needed.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		/* Check mapping between initial and default value */
 		Assert(check_GUC_init(hentry->gucvar));
@@ -2446,9 +2435,6 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
 void
 BeginReportingGUCOptions(void)
 {
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
-
 	/*
 	 * Don't do anything unless talking to an interactive frontend.
 	 */
@@ -2470,8 +2456,7 @@ BeginReportingGUCOptions(void)
 						PGC_INTERNAL, PGC_S_OVERRIDE);
 
 	/* Transmit initial values of interesting variables */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *conf = hentry->gucvar;
 
@@ -5174,16 +5159,13 @@ void
 MarkGUCPrefixReserved(const char *className)
 {
 	int			classLen = strlen(className);
-	HASH_SEQ_STATUS status;
-	GUCHashEntry *hentry;
 	MemoryContext oldcontext;
 
 	/*
 	 * Check for existing placeholders.  We must actually remove invalid
 	 * placeholders, else future parallel worker startups will fail.
 	 */
-	hash_seq_init(&status, guc_hashtab);
-	while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
+	foreach_hash(GUCHashEntry, hentry, guc_hashtab)
 	{
 		struct config_generic *var = hentry->gucvar;
 
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index f8a6db5316a..92a98d00f5b 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -35,7 +35,7 @@
  * used in initially sizing the PortalHashTable in EnablePortalManager().
  * Since the hash table can expand, there's no need to make this overly
  * generous, and keeping it small avoids unnecessary overhead in the
- * hash_seq_search() calls executed during transaction end.
+ * foreach_hash loops executed during transaction end.
  */
 #define PORTALS_PER_USER	   16
 
@@ -603,14 +603,10 @@ PortalDrop(Portal portal, bool isTopCommit)
 void
 PortalHashTableDeleteAll(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
 	if (PortalHashTable == NULL)
 		return;
 
-	hash_seq_init(&status, PortalHashTable);
-	while ((hentry = hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -621,8 +617,7 @@ PortalHashTableDeleteAll(void)
 		PortalDrop(portal, false);
 
 		/* Restart the iteration in case that led to other drops */
-		hash_seq_term(&status);
-		hash_seq_init(&status, PortalHashTable);
+		foreach_hash_restart(hentry, PortalHashTable);
 	}
 }
 
@@ -674,12 +669,8 @@ bool
 PreCommit_Portals(bool isPrepare)
 {
 	bool		result = false;
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
 
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -761,8 +752,7 @@ PreCommit_Portals(bool isPrepare)
 		 * iteration, because we could have invoked user-defined code that
 		 * caused a drop of the next portal in the hash chain.
 		 */
-		hash_seq_term(&status);
-		hash_seq_init(&status, PortalHashTable);
+		foreach_hash_restart(hentry, PortalHashTable);
 	}
 
 	return result;
@@ -777,12 +767,7 @@ PreCommit_Portals(bool isPrepare)
 void
 AtAbort_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -855,12 +840,7 @@ AtAbort_Portals(void)
 void
 AtCleanup_Portals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -914,12 +894,7 @@ AtCleanup_Portals(void)
 void
 PortalErrorCleanup(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -943,12 +918,7 @@ AtSubCommit_Portals(SubTransactionId mySubid,
 					int parentLevel,
 					ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -979,12 +949,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 				   ResourceOwner myXactOwner,
 				   ResourceOwner parentXactOwner)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1089,12 +1054,7 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 void
 AtSubCleanup_Portals(SubTransactionId mySubid)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1129,8 +1089,6 @@ Datum
 pg_cursor(PG_FUNCTION_ARGS)
 {
 	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	HASH_SEQ_STATUS hash_seq;
-	PortalHashEnt *hentry;
 
 	/*
 	 * We put all the tuples into a tuplestore in one scan of the hashtable.
@@ -1138,8 +1096,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 	 */
 	InitMaterializedSRF(fcinfo, 0);
 
-	hash_seq_init(&hash_seq, PortalHashTable);
-	while ((hentry = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 		Datum		values[6];
@@ -1168,12 +1125,7 @@ pg_cursor(PG_FUNCTION_ARGS)
 bool
 ThereAreNoReadyPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1204,12 +1156,7 @@ ThereAreNoReadyPortals(void)
 void
 HoldPinnedPortals(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
-
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
@@ -1253,15 +1200,11 @@ HoldPinnedPortals(void)
 void
 ForgetPortalSnapshots(void)
 {
-	HASH_SEQ_STATUS status;
-	PortalHashEnt *hentry;
 	int			numPortalSnaps = 0;
 	int			numActiveSnaps = 0;
 
 	/* First, scan PortalHashTable and clear portalSnapshot fields */
-	hash_seq_init(&status, PortalHashTable);
-
-	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	foreach_hash(PortalHashEnt, hentry, PortalHashTable)
 	{
 		Portal		portal = hentry->portal;
 
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index bc5bd5fa051..8ffee6368fc 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -504,9 +504,6 @@ set_interp_require(bool trusted)
 static void
 plperl_fini(int code, Datum arg)
 {
-	HASH_SEQ_STATUS hash_seq;
-	plperl_interp_desc *interp_desc;
-
 	elog(DEBUG3, "plperl_fini");
 
 	/*
@@ -528,8 +525,7 @@ plperl_fini(int code, Datum arg)
 	plperl_destroy_interp(&plperl_held_interp);
 
 	/* Zap any fully-initialized interpreters */
-	hash_seq_init(&hash_seq, plperl_interp_hash);
-	while ((interp_desc = hash_seq_search(&hash_seq)) != NULL)
+	foreach_hash(plperl_interp_desc, interp_desc, plperl_interp_hash)
 	{
 		if (interp_desc->interp)
 		{
-- 
2.53.0

