From fe85fd375c89a85a84e47a88f01a9d629a8abb09 Mon Sep 17 00:00:00 2001
From: Bertrand Drouvot <bertranddrouvot.pg@gmail.com>
Date: Tue, 6 Jan 2026 11:06:31 +0000
Subject: [PATCH v4 3/4] Remove useless calls to flush some stats

Now that some stats can be flushed outside of transaction boundaries, remove
useless calls to report/flush some stats. Those calls were in place because
before commit <XXXX> stats were flushed only at transaction boundaries.

Note that:

- it reverts 039549d70f6 (it just keeps its tests)
- it can't be done for checkpointer and bgworker for example because they don't
have a flush callback to call
- it can't be done for auxiliary process (walsummarizer for example) because they
currently do not register the new timeout handler
---
 src/backend/replication/walreceiver.c        | 10 ------
 src/backend/replication/walsender.c          | 36 ++------------------
 src/backend/utils/activity/pgstat_relation.c | 13 -------
 src/test/recovery/t/001_stream_rep.pl        |  1 +
 src/test/subscription/t/001_rep_changes.pl   |  1 +
 5 files changed, 4 insertions(+), 57 deletions(-)
  69.9% src/backend/replication/
  22.8% src/backend/utils/activity/
   3.5% src/test/recovery/t/
   3.6% src/test/subscription/t/

diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 6970af3f3ff..dcbe3517b46 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -565,16 +565,6 @@ WalReceiverMain(const void *startup_data, size_t startup_data_len)
 					 */
 					bool		requestReply = false;
 
-					/*
-					 * Report pending statistics to the cumulative stats
-					 * system.  This location is useful for the report as it
-					 * is not within a tight loop in the WAL receiver, to
-					 * avoid bloating pgstats with requests, while also making
-					 * sure that the reports happen each time a status update
-					 * is sent.
-					 */
-					pgstat_report_wal(false);
-
 					/*
 					 * Check if time since last receive from primary has
 					 * reached the configured limit.
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index a0e6a3d200c..74102def9c7 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -94,14 +94,10 @@
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/pg_lsn.h"
-#include "utils/pgstat_internal.h"
 #include "utils/ps_status.h"
 #include "utils/timeout.h"
 #include "utils/timestamp.h"
 
-/* Minimum interval used by walsender for stats flushes, in ms */
-#define WALSENDER_STATS_FLUSH_INTERVAL         1000
-
 /*
  * Maximum data payload in a WAL data message.  Must be >= XLOG_BLCKSZ.
  *
@@ -1825,7 +1821,6 @@ WalSndWaitForWal(XLogRecPtr loc)
 	int			wakeEvents;
 	uint32		wait_event = 0;
 	static XLogRecPtr RecentFlushPtr = InvalidXLogRecPtr;
-	TimestampTz last_flush = 0;
 
 	/*
 	 * Fast path to avoid acquiring the spinlock in case we already know we
@@ -1846,7 +1841,6 @@ WalSndWaitForWal(XLogRecPtr loc)
 	{
 		bool		wait_for_standby_at_stop = false;
 		long		sleeptime;
-		TimestampTz now;
 
 		/* Clear any already-pending wakeups */
 		ResetLatch(MyLatch);
@@ -1957,8 +1951,7 @@ WalSndWaitForWal(XLogRecPtr loc)
 		 * new WAL to be generated.  (But if we have nothing to send, we don't
 		 * want to wake on socket-writable.)
 		 */
-		now = GetCurrentTimestamp();
-		sleeptime = WalSndComputeSleeptime(now);
+		sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
 
 		wakeEvents = WL_SOCKET_READABLE;
 
@@ -1967,15 +1960,6 @@ WalSndWaitForWal(XLogRecPtr loc)
 
 		Assert(wait_event != 0);
 
-		/* Report IO statistics, if needed */
-		if (TimestampDifferenceExceeds(last_flush, now,
-									   WALSENDER_STATS_FLUSH_INTERVAL))
-		{
-			pgstat_flush_io(false);
-			(void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
-			last_flush = now;
-		}
-
 		WalSndWait(wakeEvents, sleeptime, wait_event);
 	}
 
@@ -2878,8 +2862,6 @@ WalSndCheckTimeOut(void)
 static void
 WalSndLoop(WalSndSendDataCallback send_data)
 {
-	TimestampTz last_flush = 0;
-
 	/*
 	 * Initialize the last reply timestamp. That enables timeout processing
 	 * from hereon.
@@ -2974,9 +2956,6 @@ WalSndLoop(WalSndSendDataCallback send_data)
 		 * WalSndWaitForWal() handle any other blocking; idle receivers need
 		 * its additional actions.  For physical replication, also block if
 		 * caught up; its send_data does not block.
-		 *
-		 * The IO statistics are reported in WalSndWaitForWal() for the
-		 * logical WAL senders.
 		 */
 		if ((WalSndCaughtUp && send_data != XLogSendLogical &&
 			 !streamingDoneSending) ||
@@ -2984,7 +2963,6 @@ WalSndLoop(WalSndSendDataCallback send_data)
 		{
 			long		sleeptime;
 			int			wakeEvents;
-			TimestampTz now;
 
 			if (!streamingDoneReceiving)
 				wakeEvents = WL_SOCKET_READABLE;
@@ -2995,21 +2973,11 @@ WalSndLoop(WalSndSendDataCallback send_data)
 			 * Use fresh timestamp, not last_processing, to reduce the chance
 			 * of reaching wal_sender_timeout before sending a keepalive.
 			 */
-			now = GetCurrentTimestamp();
-			sleeptime = WalSndComputeSleeptime(now);
+			sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
 
 			if (pq_is_send_pending())
 				wakeEvents |= WL_SOCKET_WRITEABLE;
 
-			/* Report IO statistics, if needed */
-			if (TimestampDifferenceExceeds(last_flush, now,
-										   WALSENDER_STATS_FLUSH_INTERVAL))
-			{
-				pgstat_flush_io(false);
-				(void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
-				last_flush = now;
-			}
-
 			/* Sleep until something happens or we time out */
 			WalSndWait(wakeEvents, sleeptime, WAIT_EVENT_WAL_SENDER_MAIN);
 		}
diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c
index bc8c43b96aa..feae2ae5f44 100644
--- a/src/backend/utils/activity/pgstat_relation.c
+++ b/src/backend/utils/activity/pgstat_relation.c
@@ -260,15 +260,6 @@ pgstat_report_vacuum(Relation rel, PgStat_Counter livetuples,
 	}
 
 	pgstat_unlock_entry(entry_ref);
-
-	/*
-	 * Flush IO statistics now. pgstat_report_stat() will flush IO stats,
-	 * however this will not be called until after an entire autovacuum cycle
-	 * is done -- which will likely vacuum many relations -- or until the
-	 * VACUUM command has processed all tables and committed.
-	 */
-	pgstat_flush_io(false);
-	(void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
 }
 
 /*
@@ -360,10 +351,6 @@ pgstat_report_analyze(Relation rel,
 	}
 
 	pgstat_unlock_entry(entry_ref);
-
-	/* see pgstat_report_vacuum() */
-	pgstat_flush_io(false);
-	(void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
 }
 
 /*
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index e9ac67813c7..c058a5f9b1f 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -15,6 +15,7 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary');
 $node_primary->init(
 	allows_streaming => 1,
 	auth_extra => [ '--create-role' => 'repl_role' ]);
+$node_primary->append_conf('postgresql.conf', "stats_flush_interval= '1s'");
 $node_primary->start;
 my $backup_name = 'my_backup';
 
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index d7e62e4d488..dda872f7074 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -11,6 +11,7 @@ use Test::More;
 # Initialize publisher node
 my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
 $node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf', "stats_flush_interval= '1s'");
 $node_publisher->start;
 
 # Create subscriber node
-- 
2.34.1

