From 6db9eb6f11629e3610eaaa2901f847f3238bfb3f Mon Sep 17 00:00:00 2001
From: Jakub Wartak <jakub.wartak@enterprisedb.com>
Date: Fri, 8 May 2026 09:19:49 +0200
Subject: [PATCH v10f 3/3] Lower pg_stat_io_histogram shared memory use by
 using array with indirect offsets.

We use pgstat_track_io_*() family of functions to derive the length of static
array that is allocated in shared memory region during startup. As the number
of valid combinations of backend types vs I/O object/context/operations is
coming from semi-runtime pgstat_io_get_sum_tracked() function, it cannot be
preprocessed, so we would need to come up with #define PGSTAT_IO_HIST_BUCKET_SLOTS
somehow. In order to do that - and avoid that C limitations (lack of
constexpr) - we could precalculate (in the build system) the size of
static array and generate .h include that would be included by pgstat.h,
however it appears that would be it hardly cross-portable and hardly
cross-compilable. Instead of doing that, we dynamically allocate shared memory
for IO historgrams during startup.
---
 src/backend/utils/activity/pgstat.c       |  42 +++++-
 src/backend/utils/activity/pgstat_io.c    | 165 +++++++++++++++++++---
 src/backend/utils/activity/pgstat_shmem.c |  15 ++
 src/backend/utils/adt/pgstatfuncs.c       |  20 ++-
 src/include/pgstat.h                      |  26 +++-
 5 files changed, 241 insertions(+), 27 deletions(-)

diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 7c597932671..0bd59992f4e 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -443,7 +443,13 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
 		.snapshot_ctl_off = offsetof(PgStat_Snapshot, io),
 		.shared_ctl_off = offsetof(PgStat_ShmemControl, io),
 		.shared_data_off = offsetof(PgStatShared_IO, stats),
-		.shared_data_len = sizeof(((PgStatShared_IO *) 0)->stats),
+
+		/*
+		 * Do not write everything using this .shared_data_len, as the IO
+		 * histogram backing store is handled by special-case (as it is
+		 * dynamic) in pgstat_write_statsfile() / pgstat_read_statsfile().
+		 */
+		.shared_data_len = offsetof(PgStat_IO, hist_time_buckets_slot_count),
 
 		.init_backend_cb = pgstat_io_init_backend_cb,
 		.flush_static_cb = pgstat_io_flush_cb,
@@ -1685,6 +1691,21 @@ pgstat_write_statsfile(void)
 		fputc(PGSTAT_FILE_ENTRY_FIXED, fpout);
 		pgstat_write_chunk_s(fpout, &kind);
 		pgstat_write_chunk(fpout, ptr, info->shared_data_len);
+
+		/*
+		 * PGSTAT_KIND_IO has a dynamically-sized histogram that lives outside
+		 * the shared_data_len region. This assumes that PGSTAT_FILE_FORMAT_ID
+		 * would be bumped each time that pgstat_track_io*() logic is altered.
+		 */
+		if (kind == PGSTAT_KIND_IO)
+		{
+			PgStat_IO  *io = pgStatLocal.snapshot.io;
+
+			pgstat_write_chunk(fpout, io->hist_time_buckets_slots,
+							   (size_t) io->hist_time_buckets_slot_count *
+							   PGSTAT_IO_HIST_BUCKETS *
+							   sizeof(uint64));
+		}
 	}
 
 	/*
@@ -1930,6 +1951,25 @@ pgstat_read_statsfile(void)
 						goto error;
 					}
 
+					/*
+					 * PGSTAT_KIND_IO has also semi-dynamic histogram array
+					 * appended after the main chunk. By now, the
+					 * StatsShmemInit() prepared the memory.
+					 */
+					if (kind == PGSTAT_KIND_IO)
+					{
+						PgStat_IO  *io = &shmem->io.stats;
+
+						if (!pgstat_read_chunk(fpin, io->hist_time_buckets_slots,
+											   (size_t) io->hist_time_buckets_slot_count *
+											   PGSTAT_IO_HIST_BUCKETS *
+											   sizeof(uint64)))
+						{
+							elog(WARNING, "could not read pgstat_io histogram backing store");
+							goto error;
+						}
+					}
+
 					break;
 				}
 			case PGSTAT_FILE_ENTRY_HASH:
diff --git a/src/backend/utils/activity/pgstat_io.c b/src/backend/utils/activity/pgstat_io.c
index 1696f278a77..3cc965638e8 100644
--- a/src/backend/utils/activity/pgstat_io.c
+++ b/src/backend/utils/activity/pgstat_io.c
@@ -20,6 +20,7 @@
 #include "executor/instrument.h"
 #include "port/pg_bitutils.h"
 #include "storage/bufmgr.h"
+#include "storage/shmem.h"
 #include "utils/memutils.h"
 #include "utils/pgstat_internal.h"
 
@@ -210,6 +211,8 @@ pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op,
 		{
 			int			offset;
 
+			Assert(track_io_timing || track_wal_io_timing);
+
 			/*
 			 * calculate the bucket_index based on latency in nanoseconds
 			 * (uint64)
@@ -217,6 +220,10 @@ pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op,
 			bucket_index = get_bucket_index(INSTR_TIME_GET_NANOSEC(io_time));
 
 			offset = PendingIOStats.pending_hist_time_buckets_offsets[io_object][io_context][io_op];
+
+			/* does offset points to valid slot? */
+			Assert(offset >= 0 && offset < PendingIOStats.pending_hist_time_buckets_size);
+
 			PendingIOStats.pending_hist_time_buckets[offset][bucket_index]++;
 		}
 
@@ -258,6 +265,7 @@ pgstat_io_flush_cb(bool nowait)
 {
 	LWLock	   *bktype_lock;
 	PgStat_BktypeIO *bktype_shstats;
+	PgStat_IO  *bk_io;
 
 	if (!have_iostats)
 		return false;
@@ -265,6 +273,7 @@ pgstat_io_flush_cb(bool nowait)
 	bktype_lock = &pgStatLocal.shmem->io.locks[MyBackendType];
 	bktype_shstats =
 		&pgStatLocal.shmem->io.stats.stats[MyBackendType];
+	bk_io = &pgStatLocal.shmem->io.stats;
 
 	if (!nowait)
 		LWLockAcquire(bktype_lock, LW_EXCLUSIVE);
@@ -297,16 +306,23 @@ pgstat_io_flush_cb(bool nowait)
 				 * offsets to save memory) into shared memory.
 				 */
 				if (PendingIOStats.pending_hist_time_buckets != NULL)
-					for (int b = 0; b < PGSTAT_IO_HIST_BUCKETS; b++)
-					{
-						int			pending_off = PendingIOStats.pending_hist_time_buckets_offsets[io_object][io_context][io_op];
+				{
+					int			bktype_shstats_off = bktype_shstats->hist_time_buckets_offsets[io_object][io_context][io_op];
+					int			pending_off = PendingIOStats.pending_hist_time_buckets_offsets[io_object][io_context][io_op];
 
-						if (pending_off != -1)
-						{
-							bktype_shstats->hist_time_buckets[io_object][io_context][io_op][b] +=
-								PendingIOStats.pending_hist_time_buckets[pending_off][b];
-						}
-					}
+					Assert(track_io_timing || track_wal_io_timing);
+
+					/*
+					 * -1 means here that such mapping doesn't have a slot
+					 * (based on pgstat_track_io_*()).
+					 */
+					if (bktype_shstats_off == -1 || pending_off == -1)
+						continue;
+
+					for (int b = 0; b < PGSTAT_IO_HIST_BUCKETS; b++)
+						bk_io->hist_time_buckets_slots[bktype_shstats_off][b] +=
+							PendingIOStats.pending_hist_time_buckets[pending_off][b];
+				}
 			}
 		}
 	}
@@ -415,7 +431,7 @@ pgstat_io_init_backend_cb(void)
 				{
 					if (pgstat_tracks_io_op(MyBackendType, io_object, io_context, io_op))
 					{
-						Assert(io_histograms_used <= PendingIOStats.pending_hist_time_buckets_size);
+						Assert(io_histograms_used < PendingIOStats.pending_hist_time_buckets_size);
 
 						PendingIOStats.pending_hist_time_buckets_offsets[io_object][io_context][io_op] =
 							io_histograms_used++;
@@ -428,12 +444,12 @@ pgstat_io_init_backend_cb(void)
 	}
 	else
 		PendingIOStats.pending_hist_time_buckets = NULL;
-
 }
 
 void
 pgstat_io_init_shmem_cb(void *stats)
 {
+	int			histogram_slots = 0;
 	PgStatShared_IO *stat_shmem = (PgStatShared_IO *) stats;
 
 	for (int i = 0; i < BACKEND_NUM_TYPES; i++)
@@ -441,26 +457,79 @@ pgstat_io_init_shmem_cb(void *stats)
 
 	/* this might end up being lazily allocated in pgstat_io_snapshot_cb() */
 	pgStatLocal.snapshot.io = NULL;
+
+	/*
+	 * Establish indirect mapping from
+	 * PgStat_BktypeIO.hist_time_buckets_offsets[][][] to
+	 * PgStat_IO.hist_time_buckets_slots[x]
+	 */
+	for (int i = 0; i < BACKEND_NUM_TYPES; i++)
+	{
+		for (int io_object = 0; io_object < IOOBJECT_NUM_TYPES; io_object++)
+		{
+			for (int io_context = 0; io_context < IOCONTEXT_NUM_TYPES; io_context++)
+			{
+				for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
+				{
+					if (pgstat_tracks_io_op(i, io_object, io_context, io_op))
+					{
+						stat_shmem->stats.stats[i].hist_time_buckets_offsets[io_object][io_context][io_op] =
+							histogram_slots++;
+					}
+					else
+						stat_shmem->stats.stats[i].hist_time_buckets_offsets[io_object][io_context][io_op] =
+							-1;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Sanity check: the offset table we just produced must use exactly the
+	 * number of slots StatsShmemInit() reserved.  Both come from the same
+	 * pgstat_tracks_io_*() rules, so a mismatch would indicate a bug.
+	 */
+	Assert(histogram_slots == stat_shmem->stats.hist_time_buckets_slot_count);
 }
 
 void
 pgstat_io_reset_all_cb(TimestampTz ts)
 {
+	PgStat_IO  *io_stats = &pgStatLocal.shmem->io.stats;
+
 	for (int i = 0; i < BACKEND_NUM_TYPES; i++)
 	{
 		LWLock	   *bktype_lock = &pgStatLocal.shmem->io.locks[i];
-		PgStat_BktypeIO *bktype_shstats = &pgStatLocal.shmem->io.stats.stats[i];
+		PgStat_BktypeIO *bktype_shstats = &io_stats->stats[i];
 
 		LWLockAcquire(bktype_lock, LW_EXCLUSIVE);
 
 		/*
 		 * Use the lock in the first BackendType's PgStat_BktypeIO to protect
-		 * the reset timestamp as well.
+		 * the reset timestamp.
 		 */
 		if (i == 0)
-			pgStatLocal.shmem->io.stats.stat_reset_timestamp = ts;
+			io_stats->stat_reset_timestamp = ts;
+
+		/* Reset this BackendType's histogram slots */
+		for (int io_object = 0; io_object < IOOBJECT_NUM_TYPES; io_object++)
+		{
+			for (int io_context = 0; io_context < IOCONTEXT_NUM_TYPES; io_context++)
+			{
+				for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
+				{
+					int			off = bktype_shstats->hist_time_buckets_offsets[io_object][io_context][io_op];
+
+					if (off == -1)
+						continue;
+					memset(io_stats->hist_time_buckets_slots[off], 0,
+						   sizeof(io_stats->hist_time_buckets_slots[off]));
+				}
+			}
+		}
 
-		memset(bktype_shstats, 0, sizeof(*bktype_shstats));
+		/* Avoid resetting our indirect mapping offsets */
+		memset(bktype_shstats, 0, offsetof(PgStat_BktypeIO, hist_time_buckets_offsets));
 		LWLockRelease(bktype_lock);
 	}
 }
@@ -468,14 +537,30 @@ pgstat_io_reset_all_cb(TimestampTz ts)
 void
 pgstat_io_snapshot_cb(void)
 {
+	PgStat_IO  *shmem_io = &pgStatLocal.shmem->io.stats;
+
 	if (unlikely(pgStatLocal.snapshot.io == NULL))
+	{
+		int			n = shmem_io->hist_time_buckets_slot_count;
+
 		pgStatLocal.snapshot.io = MemoryContextAllocZero(TopMemoryContext,
 														 sizeof(PgStat_IO));
 
+		/*
+		 * Allocated on demand in private (TopMemoryContext) memory and points
+		 * to the same indirect offsets.
+		 */
+		pgStatLocal.snapshot.io->hist_time_buckets_slot_count = n;
+		pgStatLocal.snapshot.io->hist_time_buckets_slots =
+			MemoryContextAllocZero(TopMemoryContext,
+								   (size_t) n * PGSTAT_IO_HIST_BUCKETS *
+								   sizeof(uint64));
+	}
+
 	for (int i = 0; i < BACKEND_NUM_TYPES; i++)
 	{
 		LWLock	   *bktype_lock = &pgStatLocal.shmem->io.locks[i];
-		PgStat_BktypeIO *bktype_shstats = &pgStatLocal.shmem->io.stats.stats[i];
+		PgStat_BktypeIO *bktype_shstats = &shmem_io->stats[i];
 		PgStat_BktypeIO *bktype_snap = &pgStatLocal.snapshot.io->stats[i];
 
 		LWLockAcquire(bktype_lock, LW_SHARED);
@@ -486,17 +571,35 @@ pgstat_io_snapshot_cb(void)
 		 */
 		if (i == 0)
 			pgStatLocal.snapshot.io->stat_reset_timestamp =
-				pgStatLocal.shmem->io.stats.stat_reset_timestamp;
+				shmem_io->stat_reset_timestamp;
 
 		/* using struct assignment due to better type safety */
 		*bktype_snap = *bktype_shstats;
+
+		/* Copy this BackendType's histogram slots */
+		for (int io_object = 0; io_object < IOOBJECT_NUM_TYPES; io_object++)
+		{
+			for (int io_context = 0; io_context < IOCONTEXT_NUM_TYPES; io_context++)
+			{
+				for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
+				{
+					int			off = bktype_shstats->hist_time_buckets_offsets[io_object][io_context][io_op];
+
+					if (off == -1)
+						continue;
+					memcpy(pgStatLocal.snapshot.io->hist_time_buckets_slots[off],
+						   shmem_io->hist_time_buckets_slots[off],
+						   sizeof(shmem_io->hist_time_buckets_slots[off]));
+				}
+			}
+		}
+
 		LWLockRelease(bktype_lock);
 	}
 }
 
 /*
 * IO statistics are not collected for all BackendTypes.
-*
 * The following BackendTypes do not participate in the cumulative stats
 * subsystem or do not perform IO on which we currently track:
 * - Dead-end backend because it is not connected to shared memory and
@@ -720,3 +823,29 @@ pgstat_tracks_io_op(BackendType bktype, IOObject io_object,
 
 	return true;
 }
+
+/*
+ * Total number of tuple of really usable combinations (BackendType, IOObject,
+ * IOContext, IOOp) that we consider trackable.
+ */
+int
+pgstat_io_get_sum_tracked(void)
+{
+	int			sum = 0;
+
+	for (int i = 0; i < BACKEND_NUM_TYPES; i++)
+		sum += pgstat_bktype_count_potentially_used(i);
+
+	return sum;
+}
+
+/*
+ * Returns number of bytes for shared memory required by
+ * PgStat_IO.hist_time_buckets_slots,
+ */
+Size
+pgstat_io_histogram_shmem_size(void)
+{
+	return mul_size(pgstat_io_get_sum_tracked(),
+					PGSTAT_IO_HIST_BUCKETS * sizeof(uint64));
+}
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index b8f354c818a..bb25be106be 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -139,6 +139,12 @@ StatsShmemSize(void)
 	sz = MAXALIGN(sizeof(PgStat_ShmemControl));
 	sz = add_size(sz, pgstat_dsa_init_size());
 
+	/*
+	 * Dynamic allocation for PgStat_IO.hist_time_buckets_slots. Sized from
+	 * the rules in pgstat_tracks_io_*()
+	 */
+	sz = add_size(sz, MAXALIGN(pgstat_io_histogram_shmem_size()));
+
 	/* Add shared memory for all the custom fixed-numbered statistics */
 	for (PgStat_Kind kind = PGSTAT_KIND_CUSTOM_MIN; kind <= PGSTAT_KIND_CUSTOM_MAX; kind++)
 	{
@@ -194,6 +200,15 @@ StatsShmemInit(void *arg)
 							  LWTRANCHE_PGSTATS_DSA, NULL);
 	dsa_pin(dsa);
 
+	/*
+	 * Prepare PgStat_IO.hist_time_buckets_slot* stuff before calling
+	 * pgstat_io_init_shmem_cb(). The additional memory for this was requested
+	 * in the StatsShmemSize() above.
+	 */
+	ctl->io.stats.hist_time_buckets_slot_count = pgstat_io_get_sum_tracked();
+	ctl->io.stats.hist_time_buckets_slots = (uint64 (*)[PGSTAT_IO_HIST_BUCKETS]) p;
+	p += MAXALIGN(pgstat_io_histogram_shmem_size());
+
 	/*
 	 * To ensure dshash is created in "plain" shared memory, temporarily limit
 	 * size of dsa to the initial size of the dsa.
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 04eab7b3bcd..f106ca9cd3e 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1667,6 +1667,7 @@ typedef enum hist_io_stat_col
  */
 static void
 pg_stat_io_histogram_build_tuples(ReturnSetInfo *rsinfo,
+								  PgStat_IO *backends_io_stats,
 								  PgStat_BktypeIO *bktype_stats,
 								  BackendType bktype,
 								  TimestampTz stat_reset_timestamp)
@@ -1695,6 +1696,16 @@ pg_stat_io_histogram_build_tuples(ReturnSetInfo *rsinfo,
 			for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++)
 			{
 				const char *op_name = pgstat_get_io_op_name(io_op);
+				int			bktype_hist_time_bucket_off;
+
+				/*
+				 * The offset is the same for every histogram bucket of this
+				 * io_obj/io_context/io_op combination.
+				 */
+				bktype_hist_time_bucket_off = bktype_stats->hist_time_buckets_offsets[io_obj][io_context][io_op];
+				if (bktype_hist_time_bucket_off == -1)
+					continue;
+				Assert(bktype_hist_time_bucket_off < backends_io_stats->hist_time_buckets_slot_count);
 
 				for (int bucket = 0; bucket < PGSTAT_IO_HIST_BUCKETS; bucket++)
 				{
@@ -1703,6 +1714,7 @@ pg_stat_io_histogram_build_tuples(ReturnSetInfo *rsinfo,
 					RangeBound	lower,
 								upper;
 					RangeType  *range;
+					uint64		bktype_bucket;
 
 					values[HIST_IO_COL_BACKEND_TYPE] = bktype_desc;
 					values[HIST_IO_COL_OBJECT] = CStringGetTextDatum(obj_name);
@@ -1731,9 +1743,9 @@ pg_stat_io_histogram_build_tuples(ReturnSetInfo *rsinfo,
 					range = make_range(typcache, &lower, &upper, false, NULL);
 					values[HIST_IO_COL_BUCKET_US] = RangeTypePGetDatum(range);
 
-					/* bucket count */
-					values[HIST_IO_COL_COUNT] = Int64GetDatum(
-															  bktype_stats->hist_time_buckets[io_obj][io_context][io_op][bucket]);
+					/* get bucket count, access indirectly */
+					bktype_bucket = backends_io_stats->hist_time_buckets_slots[bktype_hist_time_bucket_off][bucket];
+					values[HIST_IO_COL_COUNT] = Int64GetDatum(bktype_bucket);
 
 					if (stat_reset_timestamp != 0)
 						values[HIST_IO_COL_RESET_TIME] = TimestampTzGetDatum(stat_reset_timestamp);
@@ -1779,7 +1791,7 @@ pg_stat_get_io_histogram(PG_FUNCTION_ARGS)
 			continue;
 
 		/* save tuples with data from this PgStat_BktypeIO */
-		pg_stat_io_histogram_build_tuples(rsinfo, bktype_stats, bktype,
+		pg_stat_io_histogram_build_tuples(rsinfo, backends_io_stats, bktype_stats, bktype,
 										  backends_io_stats->stat_reset_timestamp);
 	}
 
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 984914e69b8..de90f1fb5b0 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -20,7 +20,6 @@
 #include "utils/backend_status.h"	/* for backward compatibility */	/* IWYU pragma: export */
 #include "utils/pgstat_kind.h"
 
-
 /* avoid including access/transam.h */
 typedef struct FullTransactionId FullTransactionId;
 
@@ -218,7 +217,7 @@ typedef struct PgStat_TableXactStatus
  * ------------------------------------------------------------
  */
 
-#define PGSTAT_FILE_FORMAT_ID	0x01A5BCBC
+#define PGSTAT_FILE_FORMAT_ID	0x01A5BCBD
 
 typedef struct PgStat_ArchiverStats
 {
@@ -342,7 +341,14 @@ typedef struct PgStat_BktypeIO
 	uint64		bytes[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
 	PgStat_Counter counts[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
 	PgStat_Counter times[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
-	uint64		hist_time_buckets[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES][PGSTAT_IO_HIST_BUCKETS];
+
+	/*
+	 * Indirect offset to PgStat_IO (parent
+	 * structure).hist_time_buckets_slots. This needs to be the last field due
+	 * to the use of memset(.., offsetof(hist_time_buckets_offsets)) in
+	 * pgstat_io_reset_all_cb().
+	 */
+	int			hist_time_buckets_offsets[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
 } PgStat_BktypeIO;
 
 typedef struct PgStat_PendingIO
@@ -358,7 +364,7 @@ typedef struct PgStat_PendingIO
 	 * memory.
 	 */
 	uint64		(*pending_hist_time_buckets)[PGSTAT_IO_HIST_BUCKETS];
-	uint64		pending_hist_time_buckets_offsets[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
+	int			pending_hist_time_buckets_offsets[IOOBJECT_NUM_TYPES][IOCONTEXT_NUM_TYPES][IOOP_NUM_TYPES];
 
 	/*
 	 * Cache how much histograms we have allocated to avoid repetably calling
@@ -374,6 +380,16 @@ typedef struct PgStat_IO
 {
 	TimestampTz stat_reset_timestamp;
 	PgStat_BktypeIO stats[BACKEND_NUM_TYPES];
+
+	/*
+	 * The IO histogram memory is sized at postmaster start from the rules in
+	 * pgstat_tracks_io_*() and persisted by additinal code to handle this
+	 * dynamic (shared) memory pointer in pgstat_write_statsfile() /
+	 * pgstat_read_statsfile(), so they nes are not part of the serialization
+	 * to disk by common code.
+	 */
+	int			hist_time_buckets_slot_count;
+	uint64		(*hist_time_buckets_slots)[PGSTAT_IO_HIST_BUCKETS];
 } PgStat_IO;
 
 typedef struct PgStat_LockEntry
@@ -654,6 +670,8 @@ extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void);
 extern bool pgstat_bktype_io_stats_valid(PgStat_BktypeIO *backend_io,
 										 BackendType bktype);
 extern int	pgstat_bktype_count_potentially_used(BackendType bktype);
+extern int	pgstat_io_get_sum_tracked(void);
+extern Size pgstat_io_histogram_shmem_size(void);
 extern void pgstat_count_io_op(IOObject io_object, IOContext io_context,
 							   IOOp io_op, uint32 cnt, uint64 bytes);
 extern instr_time pgstat_prepare_io_time(bool track_io_guc);
-- 
2.43.0

