From 5a57a7034acf37b11f9020e415c801963270bd33 Mon Sep 17 00:00:00 2001
From: Bertrand Drouvot <bertranddrouvot.pg@gmail.com>
Date: Mon, 19 Jan 2026 06:27:55 +0000
Subject: [PATCH v6 5/5] Change RELATION and DATABASE stats to anytime flush

This commit allows mixing fields with different transaction behavior within
the same RELATION or DATABASE statistics kind: some fields are transactional
(e.g., tuple inserts/updates/deletes) while others are non-transactional
(e.g., sequential scans, blocks read).

It modifies the relation flush callback to handle the anytime_only parameter
introduced in commit <nnnn>.

Implementation details:

- Change RELATION from FLUSH_AT_TXN_BOUNDARY to FLUSH_ANYTIME
- Change DATABASE from FLUSH_AT_TXN_BOUNDARY to FLUSH_ANYTIME
- Modify pgstat_relation_flush_cb() to handle anytime_only parameter: when
true, then flush only non-transactional stats and when false, then flush all
the stats. When set to true, it clears flushed fields from pending stats to
prevent double-counting at transaction boundary

DATABASE stats inherit the anytime flush behavior so that relation-derived
stats (tuples_returned, tuples_fetched, blocks_fetched, blocks_hit) are
visible while transactions are in progress.

Tests are added to verify the anytime flush behavior for mixed fields.
---
 doc/src/sgml/monitoring.sgml                 |  37 ++++++-
 src/backend/utils/activity/pgstat.c          |  17 ++--
 src/backend/utils/activity/pgstat_relation.c |  86 ++++++++++++----
 src/include/pgstat.h                         |  27 ++++-
 src/test/isolation/expected/stats.out        | 102 +++++++++++++++++++
 src/test/isolation/expected/stats_1.out      | 102 +++++++++++++++++++
 src/test/isolation/specs/stats.spec          |  27 ++++-
 7 files changed, 368 insertions(+), 30 deletions(-)
  13.0% doc/src/sgml/
  25.9% src/backend/utils/activity/
   6.0% src/include/
  49.8% src/test/isolation/expected/
   5.1% src/test/isolation/specs/

diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index b77d189a500..aa7bd2e2e2a 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -3767,6 +3767,19 @@ description | Waiting for a newly initialized WAL file to reach durable storage
    </tgroup>
   </table>
 
+  <note>
+   <para>
+    Some statistics are updated while a transaction is in progress (for example,
+    <structfield>blks_read</structfield>, <structfield>blks_hit</structfield>,
+    <structfield>tup_returned</structfield> and <structfield>tup_fetched</structfield>).
+     Statistics that either do not depend on transactions or require transactional
+     consistency are updated only when the transaction ends. Statistics that require
+     transactional consistency include <structfield>xact_commit</structfield>,
+     <structfield>xact_rollback</structfield>, <structfield>tup_inserted</structfield>,
+     <structfield>tup_updated</structfield> and <structfield>tup_deleted</structfield>.
+   </para>
+  </note>
+
  </sect2>
 
  <sect2 id="monitoring-pg-stat-database-conflicts-view">
@@ -3956,8 +3969,8 @@ description | Waiting for a newly initialized WAL file to reach durable storage
        <structfield>last_seq_scan</structfield> <type>timestamp with time zone</type>
       </para>
       <para>
-       The time of the last sequential scan on this table, based on the
-       most recent transaction stop time
+       The approximate time of the last sequential scan on this table, updated
+       at least every <varname>stats_flush_interval</varname>
       </para></entry>
      </row>
 
@@ -3984,8 +3997,8 @@ description | Waiting for a newly initialized WAL file to reach durable storage
        <structfield>last_idx_scan</structfield> <type>timestamp with time zone</type>
       </para>
       <para>
-       The time of the last index scan on this table, based on the
-       most recent transaction stop time
+       The approximate time of the last index scan on this table, updated
+       at least every <varname>stats_flush_interval</varname>
       </para></entry>
      </row>
 
@@ -4223,6 +4236,15 @@ description | Waiting for a newly initialized WAL file to reach durable storage
    </tgroup>
   </table>
 
+  <note>
+   <para>
+    The <structfield>seq_scan</structfield>, <structfield>last_seq_scan</structfield>,
+    <structfield>seq_tup_read</structfield>, <structfield>idx_scan</structfield>,
+    <structfield>last_idx_scan</structfield> and <structfield>idx_tup_fetch</structfield>
+    are updated while the transactions are in progress.
+   </para>
+  </note>
+
  </sect2>
 
  <sect2 id="monitoring-pg-stat-all-indexes-view">
@@ -4404,6 +4426,13 @@ description | Waiting for a newly initialized WAL file to reach durable storage
     tuples (see <xref linkend="indexes-multicolumn"/>).
    </para>
   </note>
+  <note>
+   <para>
+    The <structfield>idx_scan</structfield>, <structfield>last_idx_scan</structfield>,
+    <structfield>idx_tup_read</structfield> and <structfield>idx_tup_fetch</structfield>
+    are updated while the transactions are in progress.
+   </para>
+  </note>
   <tip>
    <para>
     <command>EXPLAIN ANALYZE</command> outputs the total number of index
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 79eb59b5625..9234185de64 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -291,7 +291,7 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
 
 		.fixed_amount = false,
 		.write_to_file = true,
-		.flush_mode = FLUSH_AT_TXN_BOUNDARY,
+		.flush_mode = FLUSH_ANYTIME,
 		/* so pg_stat_database entries can be seen in all databases */
 		.accessed_across_databases = true,
 
@@ -309,7 +309,7 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
 
 		.fixed_amount = false,
 		.write_to_file = true,
-		.flush_mode = FLUSH_AT_TXN_BOUNDARY,
+		.flush_mode = FLUSH_ANYTIME,
 
 		.shared_size = sizeof(PgStatShared_Relation),
 		.shared_data_off = offsetof(PgStatShared_Relation, stats),
@@ -1344,10 +1344,12 @@ pgstat_delete_pending_entry(PgStat_EntryRef *entry_ref)
 	dlist_delete(&entry_ref->pending_node);
 }
 
+
 /*
  * Flush out pending variable-numbered stats.
  *
- * If anytime_only is true, only flushes FLUSH_ANYTIME entries.
+ * If anytime_only is true, only flushes FLUSH_ANYTIME entries. For entries
+ * that support it, the callback may flush only non-transactional fields.
  * This is safe to call inside transactions.
  *
  * If anytime_only is false, flushes all entries.
@@ -1378,6 +1380,7 @@ pgstat_flush_pending_entries(bool nowait, bool anytime_only)
 		PgStat_Kind kind = key.kind;
 		const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
 		bool		did_flush;
+		bool		is_partial_flush = false;
 		dlist_node *next;
 
 		Assert(!kind_info->fixed_amount);
@@ -1397,19 +1400,21 @@ pgstat_flush_pending_entries(bool nowait, bool anytime_only)
 			continue;
 		}
 
-		/* flush the stats, if possible */
 		did_flush = kind_info->flush_pending_cb(entry_ref, nowait, anytime_only);
 
 		Assert(did_flush || nowait);
 
+		/* Partial flush only happens in anytime mode for FLUSH_ANYTIME stats */
+		is_partial_flush = (anytime_only && kind_info->flush_mode == FLUSH_ANYTIME);
+
 		/* determine next entry, before deleting the pending entry */
 		if (dlist_has_next(&pgStatPending, cur))
 			next = dlist_next_node(&pgStatPending, cur);
 		else
 			next = NULL;
 
-		/* if successfully flushed, remove entry */
-		if (did_flush)
+		/* if successfull non-partial flush, remove entry */
+		if (did_flush && !is_partial_flush)
 			pgstat_delete_pending_entry(entry_ref);
 		else
 			have_pending = true;
diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c
index ae2952cae89..10ab9e4cc76 100644
--- a/src/backend/utils/activity/pgstat_relation.c
+++ b/src/backend/utils/activity/pgstat_relation.c
@@ -47,7 +47,19 @@ static void add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_lev
 static void ensure_tabstat_xact_level(PgStat_TableStatus *pgstat_info);
 static void save_truncdrop_counters(PgStat_TableXactStatus *trans, bool is_drop);
 static void restore_truncdrop_counters(PgStat_TableXactStatus *trans);
+static void flush_relation_anytime_stats(PgStat_StatTabEntry *tabentry,
+										 PgStat_TableCounts *counts, bool anytime_only);
 
+/*
+ * Update database statistics with non-transactional stats.
+ */
+#define UPDATE_DATABASE_ANYTIME_STATS(dbentry, counts)				\
+	do {															\
+		(dbentry)->tuples_returned += (counts)->tuples_returned;	\
+		(dbentry)->tuples_fetched += (counts)->tuples_fetched;		\
+		(dbentry)->blocks_fetched += (counts)->blocks_fetched;		\
+		(dbentry)->blocks_hit += (counts)->blocks_hit;				\
+	} while (0)
 
 /*
  * Copy stats between relations. This is used for things like REINDEX
@@ -789,6 +801,29 @@ pgstat_twophase_postabort(FullTransactionId fxid, uint16 info,
 		rec->tuples_inserted + rec->tuples_updated;
 }
 
+/*
+ * Helper function to flush non-transactional statistics.
+ */
+static void
+flush_relation_anytime_stats(PgStat_StatTabEntry *tabentry, PgStat_TableCounts *counts,
+							 bool anytime_only)
+{
+	TimestampTz t;
+
+	tabentry->numscans += counts->numscans;
+	if (counts->numscans)
+	{
+		t = anytime_only ? GetCurrentTimestamp() : GetCurrentTransactionStopTimestamp();
+		if (t > tabentry->lastscan)
+			tabentry->lastscan = t;
+	}
+
+	tabentry->tuples_returned += counts->tuples_returned;
+	tabentry->tuples_fetched += counts->tuples_fetched;
+	tabentry->blocks_fetched += counts->blocks_fetched;
+	tabentry->blocks_hit += counts->blocks_hit;
+}
+
 /*
  * Flush out pending stats for the entry
  *
@@ -797,6 +832,13 @@ pgstat_twophase_postabort(FullTransactionId fxid, uint16 info,
  *
  * Some of the stats are copied to the corresponding pending database stats
  * entry when successfully flushing.
+ *
+ * If anytime_only is true, only non-transactional fields are flushed
+ * (numscans, tuples_returned, tuples_fetched, blocks_fetched, blocks_hit).
+ * Transactional fields remain pending until transaction boundary.
+ *
+ * Some of the stats are copied to the corresponding pending database stats
+ * entry when successfully flushing.
  */
 bool
 pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only)
@@ -807,8 +849,6 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o
 	PgStat_StatTabEntry *tabentry;	/* table entry of shared stats */
 	PgStat_StatDBEntry *dbentry;	/* pending database entry */
 
-	Assert(!anytime_only);
-
 	dboid = entry_ref->shared_entry->key.dboid;
 	lstats = (PgStat_TableStatus *) entry_ref->pending;
 	shtabstats = (PgStatShared_Relation *) entry_ref->shared_stats;
@@ -824,19 +864,36 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o
 	if (!pgstat_lock_entry(entry_ref, nowait))
 		return false;
 
-	/* add the values to the shared entry. */
 	tabentry = &shtabstats->stats;
 
-	tabentry->numscans += lstats->counts.numscans;
-	if (lstats->counts.numscans)
+	if (anytime_only)
 	{
-		TimestampTz t = GetCurrentTransactionStopTimestamp();
 
-		if (t > tabentry->lastscan)
-			tabentry->lastscan = t;
+		/* Flush non-transactional statistics */
+		flush_relation_anytime_stats(tabentry, &lstats->counts, true);
+
+		pgstat_unlock_entry(entry_ref);
+
+		/* Also update the corresponding fields in database stats */
+		dbentry = pgstat_prep_database_pending(dboid);
+		UPDATE_DATABASE_ANYTIME_STATS(dbentry, &lstats->counts);
+
+		/*
+		 * Clear the flushed fields from pending stats to prevent
+		 * double-counting when we flush all fields at transaction boundary.
+		 */
+		lstats->counts.numscans = 0;
+		lstats->counts.tuples_returned = 0;
+		lstats->counts.tuples_fetched = 0;
+		lstats->counts.blocks_fetched = 0;
+		lstats->counts.blocks_hit = 0;
+
+		return true;
 	}
-	tabentry->tuples_returned += lstats->counts.tuples_returned;
-	tabentry->tuples_fetched += lstats->counts.tuples_fetched;
+
+	/* Flush non-transactional statistics */
+	flush_relation_anytime_stats(tabentry, &lstats->counts, false);
+
 	tabentry->tuples_inserted += lstats->counts.tuples_inserted;
 	tabentry->tuples_updated += lstats->counts.tuples_updated;
 	tabentry->tuples_deleted += lstats->counts.tuples_deleted;
@@ -866,9 +923,6 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o
 	 */
 	tabentry->ins_since_vacuum += lstats->counts.tuples_inserted;
 
-	tabentry->blocks_fetched += lstats->counts.blocks_fetched;
-	tabentry->blocks_hit += lstats->counts.blocks_hit;
-
 	/* Clamp live_tuples in case of negative delta_live_tuples */
 	tabentry->live_tuples = Max(tabentry->live_tuples, 0);
 	/* Likewise for dead_tuples */
@@ -878,13 +932,11 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o
 
 	/* The entry was successfully flushed, add the same to database stats */
 	dbentry = pgstat_prep_database_pending(dboid);
-	dbentry->tuples_returned += lstats->counts.tuples_returned;
-	dbentry->tuples_fetched += lstats->counts.tuples_fetched;
+	UPDATE_DATABASE_ANYTIME_STATS(dbentry, &lstats->counts);
+
 	dbentry->tuples_inserted += lstats->counts.tuples_inserted;
 	dbentry->tuples_updated += lstats->counts.tuples_updated;
 	dbentry->tuples_deleted += lstats->counts.tuples_deleted;
-	dbentry->blocks_fetched += lstats->counts.blocks_fetched;
-	dbentry->blocks_hit += lstats->counts.blocks_hit;
 
 	return true;
 }
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index ef856dbf55b..06639198f28 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -21,6 +21,7 @@
 #include "utils/backend_status.h"	/* for backward compatibility */	/* IWYU pragma: export */
 #include "utils/pgstat_kind.h"
 #include "utils/relcache.h"
+#include "utils/timeout.h"
 #include "utils/wait_event.h"	/* for backward compatibility */	/* IWYU pragma: export */
 
 
@@ -537,10 +538,11 @@ extern void pgstat_report_anytime_stat(bool force);
 extern void pgstat_force_next_flush(void);
 
 /*
- * Schedule the next anytime stats update timeout.
+ * Schedule the next anytime stats update timeout and mark that we have
+ * mixed anytime stats pending.
  *
  * This should be called whenever accumulating statistics that support
- * FLUSH_ANYTIME flushing mode.
+ * FLUSH_ANYTIME or FLUSH_MIXED flushing modes.
  */
 #define pgstat_schedule_anytime_update()												\
 	do {																				\
@@ -703,37 +705,58 @@ extern void pgstat_report_analyze(Relation rel,
 #define pgstat_count_heap_scan(rel)									\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.numscans++;					\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_heap_getnext(rel)								\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.tuples_returned++;			\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_heap_fetch(rel)								\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.tuples_fetched++;			\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_index_scan(rel)								\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.numscans++;					\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_index_tuples(rel, n)							\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.tuples_returned += (n);		\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_buffer_read(rel)								\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.blocks_fetched++;			\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 #define pgstat_count_buffer_hit(rel)								\
 	do {															\
 		if (pgstat_should_count_relation(rel))						\
+		{															\
 			(rel)->pgstat_info->counts.blocks_hit++;				\
+			pgstat_schedule_anytime_update();						\
+		}															\
 	} while (0)
 
 extern void pgstat_count_heap_insert(Relation rel, PgStat_Counter n);
diff --git a/src/test/isolation/expected/stats.out b/src/test/isolation/expected/stats.out
index cfad309ccf3..11e3e57806d 100644
--- a/src/test/isolation/expected/stats.out
+++ b/src/test/isolation/expected/stats.out
@@ -2245,6 +2245,108 @@ seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum
 (1 row)
 
 
+starting permutation: s2_begin s2_table_select s1_sleep s1_table_stats s2_track_counts_off s2_table_select s1_sleep s1_table_stats s2_track_counts_on s2_table_select s1_sleep s1_table_stats s2_table_drop s2_commit
+pg_stat_force_next_flush
+------------------------
+                        
+(1 row)
+
+step s2_begin: BEGIN;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       1|           1|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_track_counts_off: SET track_counts = off;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       1|           1|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_track_counts_on: SET track_counts = on;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       2|           2|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_table_drop: DROP TABLE test_stat_tab;
+step s2_commit: COMMIT;
+
 starting permutation: s1_track_counts_off s1_table_stats s1_track_counts_on
 pg_stat_force_next_flush
 ------------------------
diff --git a/src/test/isolation/expected/stats_1.out b/src/test/isolation/expected/stats_1.out
index e1d937784cb..aef582e7582 100644
--- a/src/test/isolation/expected/stats_1.out
+++ b/src/test/isolation/expected/stats_1.out
@@ -2253,6 +2253,108 @@ seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum
 (1 row)
 
 
+starting permutation: s2_begin s2_table_select s1_sleep s1_table_stats s2_track_counts_off s2_table_select s1_sleep s1_table_stats s2_track_counts_on s2_table_select s1_sleep s1_table_stats s2_table_drop s2_commit
+pg_stat_force_next_flush
+------------------------
+                        
+(1 row)
+
+step s2_begin: BEGIN;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       1|           1|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_track_counts_off: SET track_counts = off;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       1|           1|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_track_counts_on: SET track_counts = on;
+step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value;
+key|value
+---+-----
+k0 |    1
+(1 row)
+
+step s1_sleep: SELECT pg_sleep(1.5);
+pg_sleep
+--------
+        
+(1 row)
+
+step s1_table_stats: 
+    SELECT
+        pg_stat_get_numscans(tso.oid) AS seq_scan,
+        pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+        pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+        pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+        pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+        pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+        pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+        pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+    FROM test_stat_oid AS tso
+    WHERE tso.name = 'test_stat_tab'
+
+seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count
+--------+------------+---------+---------+---------+----------+----------+------------
+       2|           2|        1|        0|        0|         1|         0|           0
+(1 row)
+
+step s2_table_drop: DROP TABLE test_stat_tab;
+step s2_commit: COMMIT;
+
 starting permutation: s1_track_counts_off s1_table_stats s1_track_counts_on
 pg_stat_force_next_flush
 ------------------------
diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec
index da16710da0f..47414eb6009 100644
--- a/src/test/isolation/specs/stats.spec
+++ b/src/test/isolation/specs/stats.spec
@@ -50,6 +50,8 @@ step s1_rollback { ROLLBACK; }
 step s1_prepare_a { PREPARE TRANSACTION 'a'; }
 step s1_commit_prepared_a { COMMIT PREPARED 'a'; }
 step s1_rollback_prepared_a { ROLLBACK PREPARED 'a'; }
+# Has to be greater than session 2 stats_flush_interval
+step s1_sleep { SELECT pg_sleep(1.5); }
 
 # Function stats steps
 step s1_ff { SELECT pg_stat_force_next_flush(); }
@@ -132,12 +134,16 @@ step s1_slru_check_stats {
 
 
 session s2
-setup { SET stats_fetch_consistency = 'none'; }
+setup {
+        SET stats_fetch_consistency = 'none';
+        SET stats_flush_interval = '1s';
+}
 step s2_begin { BEGIN; }
 step s2_commit { COMMIT; }
 step s2_commit_prepared_a { COMMIT PREPARED 'a'; }
 step s2_rollback_prepared_a { ROLLBACK PREPARED 'a'; }
 step s2_ff { SELECT pg_stat_force_next_flush(); }
+step s2_table_drop { DROP TABLE test_stat_tab; }
 
 # Function stats steps
 step s2_track_funcs_all { SET track_functions = 'all'; }
@@ -156,6 +162,8 @@ step s2_func_stats {
 }
 
 # Relation stats steps
+step s2_track_counts_on { SET track_counts = on; }
+step s2_track_counts_off { SET track_counts = off; }
 step s2_table_select { SELECT * FROM test_stat_tab ORDER BY key, value; }
 step s2_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';}
 
@@ -435,6 +443,23 @@ permutation
   s1_table_drop
   s1_table_stats
 
+### Check that some stats are updated (seq_scan and seq_tup_read)
+### while the transaction is still running
+permutation
+  s2_begin
+  s2_table_select
+  s1_sleep
+  s1_table_stats
+  s2_track_counts_off
+  s2_table_select
+  s1_sleep
+  s1_table_stats
+  s2_track_counts_on
+  s2_table_select
+  s1_sleep
+  s1_table_stats
+  s2_table_drop
+  s2_commit
 
 ### Check that we don't count changes with track counts off, but allow access
 ### to prior stats
-- 
2.34.1

