From 20e1e8f82e980f31e6855820945067b7268a4962 Mon Sep 17 00:00:00 2001 From: Alena Rybakina Date: Sat, 28 Feb 2026 19:55:30 +0300 Subject: [PATCH 2/3] Add machinery for grabbing an extended vacuum statistics. Vacuum statistics are stored separately from regular relation and database statistics. Dedicated PGSTAT_KIND_VACUUM_RELATION and PGSTAT_KIND_VACUUM_DB entries in the cumulative statistics system allocate memory for vacuum-specific metrics, reducing overall memory use. Statistics are gathered separately for tables and indexes according to vacuum phases. The ExtVacReport union and type field distinguish PGSTAT_EXTVAC_TABLE vs PGSTAT_EXTVAC_INDEX. Heap vacuum stats are sent to the cumulative statistics system after vacuum has processed the indexes, according to vacuum phases. Database vacuum statistics aggregate statistics of tables and indexes in the database. Common for tables, indexes, and database: total_blks_hit, total_blks_read, total_blks_dirtied are number of hit, missed and dirtied pages in shared buffers during a vacuum operation respectively. total_blks_dirtied means 'dirtied only by this action', so if a page was dirty before the vacuum operation, it does not count as dirtied. blk_read_time and blk_write_time track only access to buffer pages and flushing them to disk. During vacuum, write time can remain zero if no flushing operations were performed. total_time is the diff between timestamps at start and finish; it includes idle time (IO and lock waits), so it is not equal to the sum of user and system time. delay_time means total vacuum sleep time in vacuum delay point. Table and index: tuples_deleted is the number of tuples cleaned up by the vacuum operation. pages_removed is the number of pages by which the physical data storage of the relation was reduced. pages_deleted is the number of freed pages in the table (file size may not have changed). They are processed as independent structures that do not affect each other, unlike WAL and buffers, so they cannot be summed for the database as the common statistics above. Table only: pages_frozen is the number of pages marked as frozen in VM during vacuum; it is incremented when a page is marked all-frozen. pages_all_visible is the number of pages marked as all-visible in VM. wraparound_failsafe_count is the number of times vacuum started urgent cleanup to prevent wraparound. Table and database: wraparound_failsafe is the count of urgent anti-wraparound cleanups. Database only: errors is the number of errors at error level caught during vacuum. Authors: Alena Rybakina , Andrei Lepikhov , Andrei Zubkov Reviewed-by: Dilip Kumar , Masahiko Sawada , Ilia Evdokimov , jian he , Kirill Reshke , Alexander Korotkov , Jim Nasby , Sami Imseih , Karina Litskevich --- src/backend/access/heap/vacuumlazy.c | 316 +++++++- src/backend/catalog/heap.c | 1 + src/backend/catalog/index.c | 1 + src/backend/catalog/system_views.sql | 109 +++ src/backend/commands/dbcommands.c | 1 + src/backend/commands/vacuum.c | 4 + src/backend/commands/vacuumparallel.c | 10 + src/backend/utils/activity/Makefile | 1 + src/backend/utils/activity/pgstat.c | 30 +- src/backend/utils/activity/pgstat_database.c | 9 + src/backend/utils/activity/pgstat_relation.c | 6 + src/backend/utils/activity/pgstat_vacuum.c | 217 ++++++ src/backend/utils/adt/pgstatfuncs.c | 218 ++++++ src/backend/utils/misc/guc_parameters.dat | 6 + src/backend/utils/misc/postgresql.conf.sample | 1 + src/include/catalog/pg_proc.dat | 27 + src/include/commands/vacuum.h | 26 + src/include/pgstat.h | 134 +++- src/include/utils/pgstat_internal.h | 15 + src/include/utils/pgstat_kind.h | 4 +- .../vacuum-extending-in-repetable-read.out | 53 ++ src/test/isolation/isolation_schedule | 1 + .../vacuum-extending-in-repetable-read.spec | 59 ++ .../t/052_vacuum_extending_basic_test.pl | 737 ++++++++++++++++++ .../t/053_vacuum_extending_freeze_test.pl | 329 ++++++++ src/test/regress/expected/rules.out | 75 ++ 26 files changed, 2380 insertions(+), 10 deletions(-) create mode 100644 src/backend/utils/activity/pgstat_vacuum.c create mode 100644 src/test/isolation/expected/vacuum-extending-in-repetable-read.out create mode 100644 src/test/isolation/specs/vacuum-extending-in-repetable-read.spec create mode 100644 src/test/recovery/t/052_vacuum_extending_basic_test.pl create mode 100644 src/test/recovery/t/053_vacuum_extending_freeze_test.pl diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 4be267ff657..60db695b8f0 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -280,6 +280,8 @@ typedef struct LVRelState /* Error reporting state */ char *dbname; char *relnamespace; + Oid reloid; + Oid indoid; char *relname; char *indname; /* Current index name */ BlockNumber blkno; /* used only for heap operations */ @@ -399,6 +401,12 @@ typedef struct LVRelState * been permanently disabled. */ BlockNumber eager_scan_remaining_fails; + + int32 wraparound_failsafe_count; /* number of emergency vacuums to + * prevent anti-wraparound + * shutdown */ + + PgStat_VacuumRelationCounts extVacReportIdx; } LVRelState; @@ -410,7 +418,6 @@ typedef struct LVSavedErrInfo VacErrPhase phase; } LVSavedErrInfo; - /* non-export function prototypes */ static void lazy_scan_heap(LVRelState *vacrel); static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, @@ -484,6 +491,227 @@ static void update_vacuum_error_info(LVRelState *vacrel, static void restore_vacuum_error_info(LVRelState *vacrel, const LVSavedErrInfo *saved_vacrel); +/* ---------- + * extvac_stats_start() - + * + * Save cut-off values of extended vacuum counters before start of a relation + * processing. + * ---------- + */ +static void +extvac_stats_start(Relation rel, LVExtStatCounters * counters) +{ + TimestampTz starttime; + + if (!pgstat_track_vacuum_statistics) + return; + + memset(counters, 0, sizeof(LVExtStatCounters)); + + starttime = GetCurrentTimestamp(); + + counters->starttime = starttime; + counters->walusage = pgWalUsage; + counters->bufusage = pgBufferUsage; + counters->VacuumDelayTime = VacuumDelayTime; + counters->blocks_fetched = 0; + counters->blocks_hit = 0; + + if (!rel->pgstat_info || !pgstat_track_counts) + + /* + * if something goes wrong or user doesn't want to track a database + * activity - just suppress it. + */ + return; + + counters->blocks_fetched = rel->pgstat_info->counts.blocks_fetched; + counters->blocks_hit = rel->pgstat_info->counts.blocks_hit; +} + +/* ---------- + * extvac_stats_end() - + * + * Called to finish an extended vacuum statistic gathering and form a report. + * ---------- + */ +static void +extvac_stats_end(Relation rel, LVExtStatCounters * counters, + PgStat_CommonCounts * report) +{ + WalUsage walusage; + BufferUsage bufusage; + TimestampTz endtime; + long secs; + int usecs; + + if (!pgstat_track_vacuum_statistics) + return; + + memset(report, 0, sizeof(PgStat_CommonCounts)); + + /* Calculate diffs of global stat parameters on WAL and buffer usage. */ + memset(&walusage, 0, sizeof(WalUsage)); + WalUsageAccumDiff(&walusage, &pgWalUsage, &counters->walusage); + + memset(&bufusage, 0, sizeof(BufferUsage)); + BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &counters->bufusage); + + endtime = GetCurrentTimestamp(); + TimestampDifference(counters->starttime, endtime, &secs, &usecs); + + /* + * Fill additional statistics on a vacuum processing operation. + */ + report->total_blks_read += bufusage.local_blks_read + bufusage.shared_blks_read; + report->total_blks_hit += bufusage.local_blks_hit + bufusage.shared_blks_hit; + report->total_blks_dirtied += bufusage.local_blks_dirtied + bufusage.shared_blks_dirtied; + report->total_blks_written += bufusage.shared_blks_written; + + report->wal_records += walusage.wal_records; + report->wal_fpi += walusage.wal_fpi; + report->wal_bytes += walusage.wal_bytes; + + report->blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage.local_blk_read_time); + report->blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage.shared_blk_read_time); + report->blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage.local_blk_write_time); + report->blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage.shared_blk_write_time); + report->delay_time += VacuumDelayTime - counters->VacuumDelayTime; + + report->total_time += secs * 1000. + usecs / 1000.; + + if (!rel->pgstat_info || !pgstat_track_counts) + + /* + * if something goes wrong or an user doesn't want to track a database + * activity - just suppress it. + */ + return; + + report->blks_fetched += + rel->pgstat_info->counts.blocks_fetched - counters->blocks_fetched; + report->blks_hit += + rel->pgstat_info->counts.blocks_hit - counters->blocks_hit; +} + +void +extvac_stats_start_idx(Relation rel, IndexBulkDeleteResult *stats, + LVExtStatCountersIdx * counters) +{ + if (!pgstat_track_vacuum_statistics) + return; + + /* Set initial values for common heap and index statistics */ + extvac_stats_start(rel, &counters->common); + counters->pages_deleted = counters->tuples_removed = 0; + + if (stats != NULL) + { + /* + * XXX: Why do we need this code here? If it is needed, I feel lack of + * comments, describing the reason. + */ + counters->tuples_removed = stats->tuples_removed; + counters->pages_deleted = stats->pages_deleted; + } +} + +void +extvac_stats_end_idx(Relation rel, IndexBulkDeleteResult *stats, + LVExtStatCountersIdx * counters, PgStat_VacuumRelationCounts * report) +{ + if (!pgstat_track_vacuum_statistics) + return; + + memset(report, 0, sizeof(PgStat_VacuumRelationCounts)); + + extvac_stats_end(rel, &counters->common, &report->common); + + report->type = PGSTAT_EXTVAC_INDEX; + + if (stats != NULL) + { + /* + * if something goes wrong or an user doesn't want to track a database + * activity - just suppress it. + */ + + /* Fill index-specific extended stats fields */ + report->common.tuples_deleted = + stats->tuples_removed - counters->tuples_removed; + report->index.pages_deleted = + stats->pages_deleted - counters->pages_deleted; + } +} + +/* Accumulate vacuum statistics for heap. + * + * Because of complexity of vacuum processing: it switch procesing between + * the heap relation to index relations and visa versa, we need to store + * gathered statistics information for heap relations several times before + * the vacuum starts processing the indexes again. + * + * It is necessary to gather correct statistics information for heap and indexes + * otherwice the index statistics information would be added to his parent heap + * statistics information and it would be difficult to analyze it later. + * + * We can't subtract union vacuum statistics information for index from the heap relations + * because of total and delay time time statistics collecting during parallel vacuum + * procudure. +*/ +static void +accumulate_heap_vacuum_statistics(LVRelState *vacrel, PgStat_VacuumRelationCounts * extVacStats) +{ + if (!pgstat_track_vacuum_statistics) + return; + + /* Fill heap-specific extended stats fields */ + extVacStats->type = PGSTAT_EXTVAC_TABLE; + extVacStats->table.pages_scanned = vacrel->scanned_pages; + extVacStats->table.pages_removed = vacrel->removed_pages; + extVacStats->table.vm_new_frozen_pages = vacrel->vm_new_frozen_pages; + extVacStats->table.vm_new_visible_pages = vacrel->vm_new_visible_pages; + extVacStats->table.vm_new_visible_frozen_pages = vacrel->vm_new_visible_frozen_pages; + extVacStats->common.tuples_deleted = vacrel->tuples_deleted; + extVacStats->table.tuples_frozen = vacrel->tuples_frozen; + extVacStats->table.recently_dead_tuples = vacrel->recently_dead_tuples; + extVacStats->table.recently_dead_tuples = vacrel->recently_dead_tuples; + extVacStats->table.missed_dead_tuples = vacrel->missed_dead_tuples; + extVacStats->table.missed_dead_pages = vacrel->missed_dead_pages; + extVacStats->table.index_vacuum_count = vacrel->num_index_scans; + extVacStats->common.wraparound_failsafe_count = vacrel->wraparound_failsafe_count; + + extVacStats->common.blk_read_time -= vacrel->extVacReportIdx.common.blk_read_time; + extVacStats->common.blk_write_time -= vacrel->extVacReportIdx.common.blk_write_time; + extVacStats->common.total_blks_dirtied -= vacrel->extVacReportIdx.common.total_blks_dirtied; + extVacStats->common.total_blks_hit -= vacrel->extVacReportIdx.common.total_blks_hit; + extVacStats->common.total_blks_read -= vacrel->extVacReportIdx.common.total_blks_read; + extVacStats->common.total_blks_written -= vacrel->extVacReportIdx.common.total_blks_written; + extVacStats->common.wal_bytes -= vacrel->extVacReportIdx.common.wal_bytes; + extVacStats->common.wal_fpi -= vacrel->extVacReportIdx.common.wal_fpi; + extVacStats->common.wal_records -= vacrel->extVacReportIdx.common.wal_records; + + extVacStats->common.total_time -= vacrel->extVacReportIdx.common.total_time; + extVacStats->common.delay_time -= vacrel->extVacReportIdx.common.delay_time; + +} + +static void +accumulate_idxs_vacuum_statistics(LVRelState *vacrel, PgStat_VacuumRelationCounts * extVacIdxStats) +{ + /* Fill heap-specific extended stats fields */ + vacrel->extVacReportIdx.common.blk_read_time += extVacIdxStats->common.blk_read_time; + vacrel->extVacReportIdx.common.blk_write_time += extVacIdxStats->common.blk_write_time; + vacrel->extVacReportIdx.common.total_blks_dirtied += extVacIdxStats->common.total_blks_dirtied; + vacrel->extVacReportIdx.common.total_blks_hit += extVacIdxStats->common.total_blks_hit; + vacrel->extVacReportIdx.common.total_blks_read += extVacIdxStats->common.total_blks_read; + vacrel->extVacReportIdx.common.total_blks_written += extVacIdxStats->common.total_blks_written; + vacrel->extVacReportIdx.common.wal_bytes += extVacIdxStats->common.wal_bytes; + vacrel->extVacReportIdx.common.wal_fpi += extVacIdxStats->common.wal_fpi; + vacrel->extVacReportIdx.common.wal_records += extVacIdxStats->common.wal_records; + vacrel->extVacReportIdx.common.delay_time += extVacIdxStats->common.delay_time; + vacrel->extVacReportIdx.common.total_time += extVacIdxStats->common.total_time; +} /* @@ -635,7 +863,6 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, new_rel_allvisible, new_rel_allfrozen; PGRUsage ru0; - TimestampTz starttime = 0; PgStat_Counter startreadtime = 0, startwritetime = 0; WalUsage startwalusage = pgWalUsage; @@ -643,6 +870,11 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, ErrorContextCallback errcallback; char **indnames = NULL; Size dead_items_max_bytes = 0; + LVExtStatCounters extVacCounters; + PgStat_VacuumRelationCounts extVacReport; + + /* Initialize vacuum statistics */ + memset(&extVacReport, 0, sizeof(PgStat_VacuumRelationCounts)); verbose = (params.options & VACOPT_VERBOSE) != 0; instrument = (verbose || (AmAutoVacuumWorkerProcess() && @@ -657,8 +889,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, } } - /* Used for instrumentation and stats report */ - starttime = GetCurrentTimestamp(); + extvac_stats_start(rel, &extVacCounters); pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM, RelationGetRelid(rel)); @@ -671,6 +902,8 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, pgstat_progress_update_param(PROGRESS_VACUUM_STARTED_BY, PROGRESS_VACUUM_STARTED_BY_MANUAL); + extvac_stats_start(rel, &extVacCounters); + /* * Setup error traceback support for ereport() first. The idea is to set * up an error context callback to display additional information on any @@ -687,6 +920,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, vacrel->dbname = get_database_name(MyDatabaseId); vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel)); vacrel->relname = pstrdup(RelationGetRelationName(rel)); + vacrel->reloid = RelationGetRelid(rel); vacrel->indname = NULL; vacrel->phase = VACUUM_ERRCB_PHASE_UNKNOWN; vacrel->verbose = verbose; @@ -695,6 +929,9 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, errcallback.previous = error_context_stack; error_context_stack = &errcallback; + memset(&vacrel->extVacReportIdx, 0, sizeof(PgStat_VacuumRelationCounts)); + memset(&extVacReport.common, 0, sizeof(PgStat_CommonCounts)); + /* Set up high level stuff about rel and its indexes */ vacrel->rel = rel; vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes, @@ -797,6 +1034,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs); vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel); vacrel->vistest = GlobalVisTestFor(rel); + vacrel->wraparound_failsafe_count = 0; /* Initialize state used to track oldest extant XID/MXID */ vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin; @@ -959,6 +1197,9 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, vacrel->NewRelfrozenXid, vacrel->NewRelminMxid, &frozenxid_updated, &minmulti_updated, false); + /* Make generic extended vacuum stats report */ + /* extvac_stats_end(rel, &extVacCounters, &extVacReport.common); */ + /* * Report results to the cumulative stats system, too. * @@ -969,11 +1210,19 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, * soon in cases where the failsafe prevented significant amounts of heap * vacuuming. */ + + /* + * Make generic extended vacuum stats report and fill heap-specific + * extended stats fields. + */ + extvac_stats_end(vacrel->rel, &extVacCounters, &extVacReport.common); + accumulate_heap_vacuum_statistics(vacrel, &extVacReport); + pgstat_report_vacuum_extstats(vacrel->reloid, rel->rd_rel->relisshared, &extVacReport); pgstat_report_vacuum(rel, Max(vacrel->new_live_tuples, 0), vacrel->recently_dead_tuples + vacrel->missed_dead_tuples, - starttime); + extVacCounters.starttime); pgstat_progress_end_command(); if (instrument) @@ -981,7 +1230,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, TimestampTz endtime = GetCurrentTimestamp(); if (verbose || params.log_vacuum_min_duration == 0 || - TimestampDifferenceExceeds(starttime, endtime, + TimestampDifferenceExceeds(extVacCounters.starttime, endtime, params.log_vacuum_min_duration)) { long secs_dur; @@ -997,7 +1246,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, int64 total_blks_read; int64 total_blks_dirtied; - TimestampDifference(starttime, endtime, &secs_dur, &usecs_dur); + TimestampDifference(extVacCounters.starttime, endtime, &secs_dur, &usecs_dur); memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &startwalusage); memset(&bufferusage, 0, sizeof(BufferUsage)); @@ -1940,6 +2189,7 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, /* Count the newly all-frozen pages for logging */ vacrel->vm_new_visible_pages++; + vacrel->vm_new_frozen_pages++; vacrel->vm_new_visible_frozen_pages++; } @@ -2662,10 +2912,20 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) } else { + LVExtStatCounters counters; + PgStat_VacuumRelationCounts extVacReport; + + memset(&extVacReport.common, 0, sizeof(PgStat_CommonCounts)); + + extvac_stats_start(vacrel->rel, &counters); + /* Outsource everything to parallel variant */ parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples, vacrel->num_index_scans); + extvac_stats_end(vacrel->rel, &counters, &extVacReport.common); + accumulate_idxs_vacuum_statistics(vacrel, &extVacReport); + /* * Do a postcheck to consider applying wraparound failsafe now. Note * that parallel VACUUM only gets the precheck and this postcheck. @@ -3012,6 +3272,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel) int64 progress_val[3] = {0, 0, PROGRESS_VACUUM_MODE_FAILSAFE}; VacuumFailsafeActive = true; + vacrel->wraparound_failsafe_count++; /* * Abandon use of a buffer access strategy to allow use of all of @@ -3094,10 +3355,20 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) } else { + LVExtStatCounters counters; + PgStat_VacuumRelationCounts extVacReport; + + memset(&extVacReport.common, 0, sizeof(PgStat_CommonCounts)); + + extvac_stats_start(vacrel->rel, &counters); + /* Outsource everything to parallel variant */ parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples, vacrel->num_index_scans, estimated_count); + + extvac_stats_end(vacrel->rel, &counters, &extVacReport.common); + accumulate_idxs_vacuum_statistics(vacrel, &extVacReport); } /* Reset the progress counters */ @@ -3123,6 +3394,14 @@ lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, { IndexVacuumInfo ivinfo; LVSavedErrInfo saved_err_info; + LVExtStatCountersIdx extVacCounters; + PgStat_VacuumRelationCounts extVacReport; + + memset(&extVacReport, 0, sizeof(PgStat_VacuumRelationCounts)); + memset(&extVacReport.common, 0, sizeof(PgStat_CommonCounts)); + + /* Set initial statistics values to gather vacuum statistics for the index */ + extvac_stats_start_idx(indrel, istat, &extVacCounters); ivinfo.index = indrel; ivinfo.heaprel = vacrel->rel; @@ -3141,6 +3420,7 @@ lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, */ Assert(vacrel->indname == NULL); vacrel->indname = pstrdup(RelationGetRelationName(indrel)); + vacrel->indoid = RelationGetRelid(indrel); update_vacuum_error_info(vacrel, &saved_err_info, VACUUM_ERRCB_PHASE_VACUUM_INDEX, InvalidBlockNumber, InvalidOffsetNumber); @@ -3149,6 +3429,14 @@ lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, istat = vac_bulkdel_one_index(&ivinfo, istat, vacrel->dead_items, vacrel->dead_items_info); + /* Make extended vacuum stats report for index */ + extvac_stats_end_idx(indrel, istat, &extVacCounters, &extVacReport); + + if (!ParallelVacuumIsActive(vacrel)) + accumulate_idxs_vacuum_statistics(vacrel, &extVacReport); + + pgstat_report_vacuum_extstats(vacrel->indoid, indrel->rd_rel->relisshared, &extVacReport); + /* Revert to the previous phase information for error traceback */ restore_vacuum_error_info(vacrel, &saved_err_info); pfree(vacrel->indname); @@ -3173,6 +3461,11 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, { IndexVacuumInfo ivinfo; LVSavedErrInfo saved_err_info; + LVExtStatCountersIdx extVacCounters; + PgStat_VacuumRelationCounts extVacReport; + + /* Set initial statistics values to gather vacuum statistics for the index */ + extvac_stats_start_idx(indrel, istat, &extVacCounters); ivinfo.index = indrel; ivinfo.heaprel = vacrel->rel; @@ -3192,12 +3485,21 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, */ Assert(vacrel->indname == NULL); vacrel->indname = pstrdup(RelationGetRelationName(indrel)); + vacrel->indoid = RelationGetRelid(indrel); update_vacuum_error_info(vacrel, &saved_err_info, VACUUM_ERRCB_PHASE_INDEX_CLEANUP, InvalidBlockNumber, InvalidOffsetNumber); istat = vac_cleanup_one_index(&ivinfo, istat); + /* Make extended vacuum stats report for index */ + extvac_stats_end_idx(indrel, istat, &extVacCounters, &extVacReport); + + if (!ParallelVacuumIsActive(vacrel)) + accumulate_idxs_vacuum_statistics(vacrel, &extVacReport); + + pgstat_report_vacuum_extstats(RelationGetRelid(indrel), indrel->rd_rel->relisshared, &extVacReport); + /* Revert to the previous phase information for error traceback */ restore_vacuum_error_info(vacrel, &saved_err_info); pfree(vacrel->indname); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 606434823cf..024946766d8 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1883,6 +1883,7 @@ heap_drop_with_catalog(Oid relid) /* ensure that stats are dropped if transaction commits */ pgstat_drop_relation(rel); + pgstat_vacuum_relation_delete_pending_cb(RelationGetRelid(rel)); /* * Close relcache entry, but *keep* AccessExclusiveLock on the relation diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 43de42ce39e..f006287cbf5 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -2325,6 +2325,7 @@ index_drop(Oid indexId, bool concurrent, bool concurrent_lock_mode) /* ensure that stats are dropped if transaction commits */ pgstat_drop_relation(userIndexRelation); + pgstat_vacuum_relation_delete_pending_cb(RelationGetRelid(userIndexRelation)); /* * Close and flush the index's relcache entry, to ensure relcache doesn't diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index fa102f9c270..c9d19c2fb18 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -1454,3 +1454,112 @@ REVOKE ALL ON pg_aios FROM PUBLIC; GRANT SELECT ON pg_aios TO pg_read_all_stats; REVOKE EXECUTE ON FUNCTION pg_get_aios() FROM PUBLIC; GRANT EXECUTE ON FUNCTION pg_get_aios() TO pg_read_all_stats; +-- +-- Show extended cumulative statistics on a vacuum operation over all tables and +-- databases of the instance. +-- Use Invalid Oid "0" as an input relation id to get stat on each table in a +-- database. +-- + +CREATE VIEW pg_stat_vacuum_tables AS + SELECT + N.nspname AS schemaname, + C.relname AS relname, + S.relid as relid, + + S.total_blks_read AS total_blks_read, + S.total_blks_hit AS total_blks_hit, + S.total_blks_dirtied AS total_blks_dirtied, + S.total_blks_written AS total_blks_written, + + S.rel_blks_read AS rel_blks_read, + S.rel_blks_hit AS rel_blks_hit, + + S.pages_scanned AS pages_scanned, + S.pages_removed AS pages_removed, + S.vm_new_frozen_pages AS vm_new_frozen_pages, + S.vm_new_visible_pages AS vm_new_visible_pages, + S.vm_new_visible_frozen_pages AS vm_new_visible_frozen_pages, + S.missed_dead_pages AS missed_dead_pages, + S.tuples_deleted AS tuples_deleted, + S.tuples_frozen AS tuples_frozen, + S.recently_dead_tuples AS recently_dead_tuples, + S.missed_dead_tuples AS missed_dead_tuples, + + S.wraparound_failsafe AS wraparound_failsafe, + S.index_vacuum_count AS index_vacuum_count, + S.wal_records AS wal_records, + S.wal_fpi AS wal_fpi, + S.wal_bytes AS wal_bytes, + + S.blk_read_time AS blk_read_time, + S.blk_write_time AS blk_write_time, + + S.delay_time AS delay_time, + S.total_time AS total_time + + FROM pg_class C JOIN + pg_namespace N ON N.oid = C.relnamespace, + LATERAL pg_stat_get_vacuum_tables(C.oid) S + WHERE C.relkind IN ('r', 't', 'm'); + +CREATE VIEW pg_stat_vacuum_indexes AS + SELECT + C.oid AS relid, + I.oid AS indexrelid, + N.nspname AS schemaname, + C.relname AS relname, + I.relname AS indexrelname, + + S.total_blks_read AS total_blks_read, + S.total_blks_hit AS total_blks_hit, + S.total_blks_dirtied AS total_blks_dirtied, + S.total_blks_written AS total_blks_written, + + S.rel_blks_read AS rel_blks_read, + S.rel_blks_hit AS rel_blks_hit, + + S.pages_deleted AS pages_deleted, + S.tuples_deleted AS tuples_deleted, + + S.wal_records AS wal_records, + S.wal_fpi AS wal_fpi, + S.wal_bytes AS wal_bytes, + + S.blk_read_time AS blk_read_time, + S.blk_write_time AS blk_write_time, + + S.delay_time AS delay_time, + S.total_time AS total_time + FROM + pg_class C JOIN + pg_index X ON C.oid = X.indrelid JOIN + pg_class I ON I.oid = X.indexrelid + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace), + LATERAL pg_stat_get_vacuum_indexes(I.oid) S + WHERE C.relkind IN ('r', 't', 'm'); + +CREATE VIEW pg_stat_vacuum_database AS + SELECT + D.oid as dboid, + D.datname AS dbname, + + S.db_blks_read AS db_blks_read, + S.db_blks_hit AS db_blks_hit, + S.total_blks_dirtied AS total_blks_dirtied, + S.total_blks_written AS total_blks_written, + + S.wal_records AS wal_records, + S.wal_fpi AS wal_fpi, + S.wal_bytes AS wal_bytes, + + S.blk_read_time AS blk_read_time, + S.blk_write_time AS blk_write_time, + + S.delay_time AS delay_time, + S.total_time AS total_time, + S.wraparound_failsafe AS wraparound_failsafe, + S.errors AS errors + FROM + pg_database D, + LATERAL pg_stat_get_vacuum_database(D.oid) S; diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 87949054f26..9f10710636b 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -1815,6 +1815,7 @@ dropdb(const char *dbname, bool missing_ok, bool force) * Tell the cumulative stats system to forget it immediately, too. */ pgstat_drop_database(db_id); + pgstat_drop_vacuum_database(db_id); /* * Except for the deletion of the catalog row, subsequent actions are not diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 03932f45c8a..769d0ba543f 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -117,6 +117,9 @@ pg_atomic_uint32 *VacuumSharedCostBalance = NULL; pg_atomic_uint32 *VacuumActiveNWorkers = NULL; int VacuumCostBalanceLocal = 0; +/* Cumulative storage to report total vacuum delay time. */ +double VacuumDelayTime = 0; /* msec. */ + /* non-export function prototypes */ static List *expand_vacuum_rel(VacuumRelation *vrel, MemoryContext vac_context, int options); @@ -2536,6 +2539,7 @@ vacuum_delay_point(bool is_analyze) exit(1); VacuumCostBalance = 0; + VacuumDelayTime += msec; /* * Balance and update limit values for autovacuum workers. We must do diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index c3b3c9ea21a..16d215150cc 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -868,6 +868,8 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, IndexBulkDeleteResult *istat = NULL; IndexBulkDeleteResult *istat_res; IndexVacuumInfo ivinfo; + LVExtStatCountersIdx extVacCounters; + PgStat_VacuumRelationCounts extVacReport; /* * Update the pointer to the corresponding bulk-deletion result if someone @@ -876,6 +878,9 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, if (indstats->istat_updated) istat = &(indstats->istat); + /* Set initial statistics values to gather vacuum statistics for the index */ + extvac_stats_start_idx(indrel, &(indstats->istat), &extVacCounters); + ivinfo.index = indrel; ivinfo.heaprel = pvs->heaprel; ivinfo.analyze_only = false; @@ -904,6 +909,10 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, RelationGetRelationName(indrel)); } + /* Make extended vacuum stats report for index */ + extvac_stats_end_idx(indrel, istat_res, &extVacCounters, &extVacReport); + pgstat_report_vacuum_extstats(RelationGetRelid(indrel), indrel->rd_rel->relisshared, &extVacReport); + /* * Copy the index bulk-deletion result returned from ambulkdelete and * amvacuumcleanup to the DSM segment if it's the first cycle because they @@ -1054,6 +1063,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) /* Set cost-based vacuum delay */ VacuumUpdateCosts(); VacuumCostBalance = 0; + VacuumDelayTime = 0; VacuumCostBalanceLocal = 0; VacuumSharedCostBalance = &(shared->cost_balance); VacuumActiveNWorkers = &(shared->active_nworkers); diff --git a/src/backend/utils/activity/Makefile b/src/backend/utils/activity/Makefile index 0eb29ee78aa..df012ae3db5 100644 --- a/src/backend/utils/activity/Makefile +++ b/src/backend/utils/activity/Makefile @@ -27,6 +27,7 @@ OBJS = \ pgstat_function.o \ pgstat_io.o \ pgstat_relation.o \ + pgstat_vacuum.o \ pgstat_replslot.o \ pgstat_shmem.o \ pgstat_slru.o \ diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index 11bb71cad5a..a854a64f135 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -203,7 +203,7 @@ static inline bool pgstat_is_kind_valid(PgStat_Kind kind); bool pgstat_track_counts = false; int pgstat_fetch_consistency = PGSTAT_FETCH_CONSISTENCY_CACHE; - +bool pgstat_track_vacuum_statistics = false; /* ---------- * state shared with pgstat_*.c @@ -482,6 +482,34 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .reset_all_cb = pgstat_wal_reset_all_cb, .snapshot_cb = pgstat_wal_snapshot_cb, }, + [PGSTAT_KIND_VACUUM_DB] = { + .name = "vacuum statistics", + + .fixed_amount = false, + .write_to_file = true, + /* so pg_stat_database entries can be seen in all databases */ + .accessed_across_databases = true, + + .shared_size = sizeof(PgStatShared_VacuumDB), + .shared_data_off = offsetof(PgStatShared_VacuumDB, stats), + .shared_data_len = sizeof(((PgStatShared_VacuumDB *) 0)->stats), + .pending_size = sizeof(PgStat_VacuumDBCounts), + + .flush_pending_cb = pgstat_vacuum_db_flush_cb, + }, + [PGSTAT_KIND_VACUUM_RELATION] = { + .name = "vacuum statistics", + + .fixed_amount = false, + .write_to_file = true, + + .shared_size = sizeof(PgStatShared_VacuumRelation), + .shared_data_off = offsetof(PgStatShared_VacuumRelation, stats), + .shared_data_len = sizeof(((PgStatShared_VacuumRelation *) 0)->stats), + .pending_size = sizeof(PgStat_RelationVacuumPending), + + .flush_pending_cb = pgstat_vacuum_relation_flush_cb + }, }; /* diff --git a/src/backend/utils/activity/pgstat_database.c b/src/backend/utils/activity/pgstat_database.c index d7f6d4c5ee6..079f02b3f03 100644 --- a/src/backend/utils/activity/pgstat_database.c +++ b/src/backend/utils/activity/pgstat_database.c @@ -46,6 +46,15 @@ pgstat_drop_database(Oid databaseid) pgstat_drop_transactional(PGSTAT_KIND_DATABASE, databaseid, InvalidOid); } +/* + * Remove entry for the database being dropped. + */ +void +pgstat_drop_vacuum_database(Oid databaseid) +{ + pgstat_drop_transactional(PGSTAT_KIND_VACUUM_DB, databaseid, InvalidOid); +} + /* * Called from autovacuum.c to report startup of an autovacuum process. * We are called before InitPostgres is done, so can't rely on MyDatabaseId; diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c index 885d590d2b2..9516552a43e 100644 --- a/src/backend/utils/activity/pgstat_relation.c +++ b/src/backend/utils/activity/pgstat_relation.c @@ -902,6 +902,12 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait) return true; } +void +pgstat_vacuum_relation_delete_pending_cb(Oid relid) +{ + pgstat_drop_transactional(PGSTAT_KIND_VACUUM_RELATION, relid, InvalidOid); +} + void pgstat_relation_delete_pending_cb(PgStat_EntryRef *entry_ref) { diff --git a/src/backend/utils/activity/pgstat_vacuum.c b/src/backend/utils/activity/pgstat_vacuum.c new file mode 100644 index 00000000000..d3426e617a8 --- /dev/null +++ b/src/backend/utils/activity/pgstat_vacuum.c @@ -0,0 +1,217 @@ +#include "postgres.h" + +#include "pgstat.h" +#include "utils/pgstat_internal.h" +#include "utils/memutils.h" + +/* ---------- + * GUC parameters + * ---------- + */ +bool pgstat_track_vacuum_statistics_for_relations = false; + +#define ACCUMULATE_FIELD(field) dst->field += src->field; + +#define ACCUMULATE_SUBFIELD(substruct, field) \ + (dst->substruct.field += src->substruct.field) + +static void +pgstat_accumulate_common(PgStat_CommonCounts * dst, const PgStat_CommonCounts * src) +{ + ACCUMULATE_FIELD(total_blks_read); + ACCUMULATE_FIELD(total_blks_hit); + ACCUMULATE_FIELD(total_blks_dirtied); + ACCUMULATE_FIELD(total_blks_written); + + ACCUMULATE_FIELD(blks_fetched); + ACCUMULATE_FIELD(blks_hit); + + ACCUMULATE_FIELD(wal_records); + ACCUMULATE_FIELD(wal_fpi); + ACCUMULATE_FIELD(wal_bytes); + + ACCUMULATE_FIELD(blk_read_time); + ACCUMULATE_FIELD(blk_write_time); + ACCUMULATE_FIELD(delay_time); + ACCUMULATE_FIELD(total_time); + + ACCUMULATE_FIELD(tuples_deleted); + ACCUMULATE_FIELD(wraparound_failsafe_count); +} + +static void +pgstat_accumulate_extvac_stats_relations(PgStat_VacuumRelationCounts * dst, PgStat_VacuumRelationCounts * src) +{ + if (!pgstat_track_vacuum_statistics) + return; + + if (dst->type == PGSTAT_EXTVAC_INVALID) + dst->type = src->type; + + Assert(src->type != PGSTAT_EXTVAC_INVALID && src->type != PGSTAT_EXTVAC_DB && src->type == dst->type); + + pgstat_accumulate_common(&dst->common, &src->common); + + ACCUMULATE_SUBFIELD(common, blks_fetched); + ACCUMULATE_SUBFIELD(common, blks_hit); + + if (dst->type == PGSTAT_EXTVAC_TABLE) + { + ACCUMULATE_SUBFIELD(common, tuples_deleted); + ACCUMULATE_SUBFIELD(table, pages_scanned); + ACCUMULATE_SUBFIELD(table, pages_removed); + ACCUMULATE_SUBFIELD(table, vm_new_frozen_pages); + ACCUMULATE_SUBFIELD(table, vm_new_visible_pages); + ACCUMULATE_SUBFIELD(table, vm_new_visible_frozen_pages); + ACCUMULATE_SUBFIELD(table, tuples_frozen); + ACCUMULATE_SUBFIELD(table, recently_dead_tuples); + ACCUMULATE_SUBFIELD(table, index_vacuum_count); + ACCUMULATE_SUBFIELD(table, missed_dead_pages); + ACCUMULATE_SUBFIELD(table, missed_dead_tuples); + } + else if (dst->type == PGSTAT_EXTVAC_INDEX) + { + ACCUMULATE_SUBFIELD(common, tuples_deleted); + ACCUMULATE_SUBFIELD(index, pages_deleted); + } +} + +static void +pgstat_accumulate_extvac_stats_db(PgStat_VacuumDBCounts * dst, PgStat_VacuumDBCounts * src) +{ + if (!pgstat_track_vacuum_statistics) + return; + + pgstat_accumulate_common(&dst->common, &src->common); +} + +/* + * Report that the table was just vacuumed and flush statistics. + */ +void +pgstat_report_vacuum_extstats(Oid tableoid, bool shared, + PgStat_VacuumRelationCounts * params) +{ + PgStat_EntryRef *entry_ref; + PgStatShared_VacuumRelation *shtabentry; + PgStatShared_VacuumDB *shdbentry; + Oid dboid = (shared ? InvalidOid : MyDatabaseId); + + if (!pgstat_track_vacuum_statistics) + return; + + entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_VACUUM_RELATION, + dboid, tableoid, false); + shtabentry = (PgStatShared_VacuumRelation *) entry_ref->shared_stats; + pgstat_accumulate_extvac_stats_relations(&shtabentry->stats, params); + + pgstat_unlock_entry(entry_ref); + + if (!shared) + entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_VACUUM_DB, + dboid, InvalidOid, false); + else + entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_VACUUM_DB, + MyDatabaseId, InvalidOid, false); + + shdbentry = (PgStatShared_VacuumDB *) entry_ref->shared_stats; + + pgstat_accumulate_common(&shdbentry->stats.common, ¶ms->common); + + pgstat_unlock_entry(entry_ref); +} + +/* + * Flush out pending stats for the entry + * + * If nowait is true, this function returns false if lock could not + * immediately acquired, otherwise true is returned. + */ +bool +pgstat_vacuum_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait) +{ + PgStatShared_VacuumRelation *shtabstats; + PgStat_RelationVacuumPending *pendingent; /* table entry of shared stats */ + + pendingent = (PgStat_RelationVacuumPending *) entry_ref->pending; + shtabstats = (PgStatShared_VacuumRelation *) entry_ref->shared_stats; + + /* + * Ignore entries that didn't accumulate any actual counts. + */ + if (pg_memory_is_all_zeros(&pendingent, + sizeof(struct PgStat_RelationVacuumPending))) + return true; + + if (!pgstat_lock_entry(entry_ref, nowait)) + { + return false; + } + + pgstat_accumulate_extvac_stats_relations(&(shtabstats->stats), &(pendingent->counts)); + + pgstat_unlock_entry(entry_ref); + + return true; +} + +/* + * Support function for the SQL-callable pgstat* functions. Returns + * the vacuum collected statistics for one relation or NULL. + */ +PgStat_VacuumRelationCounts * +pgstat_fetch_stat_vacuum_tabentry(Oid relid, Oid dbid) +{ + return (PgStat_VacuumRelationCounts *) + pgstat_fetch_entry(PGSTAT_KIND_VACUUM_RELATION, dbid, relid); +} + +PgStat_VacuumDBCounts * +pgstat_fetch_stat_vacuum_dbentry(Oid dbid) +{ + return (PgStat_VacuumDBCounts *) + pgstat_fetch_entry(PGSTAT_KIND_VACUUM_DB, dbid, InvalidOid); +} + +bool +pgstat_vacuum_db_flush_cb(PgStat_EntryRef *entry_ref, bool nowait) +{ + PgStatShared_VacuumDB *sharedent; + PgStat_VacuumDBCounts *pendingent; + + pendingent = (PgStat_VacuumDBCounts *) entry_ref->pending; + sharedent = (PgStatShared_VacuumDB *) entry_ref->shared_stats; + + if (!pgstat_lock_entry(entry_ref, nowait)) + return false; + + /* The entry was successfully flushed, add the same to database stats */ + pgstat_accumulate_extvac_stats_db(&(sharedent->stats), pendingent); + + pgstat_unlock_entry(entry_ref); + + return true; +} + +/* + * Find or create a local PgStat_VacuumDBCounts entry for dboid. + */ +PgStat_VacuumDBCounts * +pgstat_prep_vacuum_database_pending(Oid dboid) +{ + PgStat_EntryRef *entry_ref; + + /* + * This should not report stats on database objects before having + * connected to a database. + */ + Assert(!OidIsValid(dboid) || OidIsValid(MyDatabaseId)); + + entry_ref = pgstat_prep_pending_entry(PGSTAT_KIND_VACUUM_DB, dboid, InvalidOid, + NULL); + + if (entry_ref == NULL) + return NULL; + + return entry_ref->pending; +} diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 901f3dd55a1..38f16ec4c85 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -2313,3 +2313,221 @@ pg_stat_have_stats(PG_FUNCTION_ARGS) PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objid)); } + +/* + * Get the vacuum statistics for the heap tables. + */ +Datum +pg_stat_get_vacuum_tables(PG_FUNCTION_ARGS) +{ +#define PG_STAT_GET_VACUUM_TABLES_STATS_COLS 26 + + Oid relid = PG_GETARG_OID(0); + PgStat_VacuumRelationCounts *extvacuum; + PgStat_VacuumRelationCounts *pending; + TupleDesc tupdesc; + Datum values[PG_STAT_GET_VACUUM_TABLES_STATS_COLS] = {0}; + bool nulls[PG_STAT_GET_VACUUM_TABLES_STATS_COLS] = {0}; + char buf[256]; + int i = 0; + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + pending = pgstat_fetch_stat_vacuum_tabentry(relid, MyDatabaseId); + + if (!pending) + { + pending = pgstat_fetch_stat_vacuum_tabentry(relid, 0); + + if (!pending) + { + InitMaterializedSRF(fcinfo, 0); + PG_RETURN_VOID(); + } + } + + extvacuum = pending; + + i = 0; + + values[i++] = ObjectIdGetDatum(relid); + + values[i++] = Int64GetDatum(extvacuum->common.total_blks_read); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_hit); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_dirtied); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_written); + + values[i++] = Int64GetDatum(extvacuum->common.blks_fetched - + extvacuum->common.blks_hit); + values[i++] = Int64GetDatum(extvacuum->common.blks_hit); + + values[i++] = Int64GetDatum(extvacuum->table.pages_scanned); + values[i++] = Int64GetDatum(extvacuum->table.pages_removed); + values[i++] = Int64GetDatum(extvacuum->table.vm_new_frozen_pages); + values[i++] = Int64GetDatum(extvacuum->table.vm_new_visible_pages); + values[i++] = Int64GetDatum(extvacuum->table.vm_new_visible_frozen_pages); + values[i++] = Int64GetDatum(extvacuum->table.missed_dead_pages); + values[i++] = Int64GetDatum(extvacuum->common.tuples_deleted); + values[i++] = Int64GetDatum(extvacuum->table.tuples_frozen); + values[i++] = Int64GetDatum(extvacuum->table.recently_dead_tuples); + values[i++] = Int64GetDatum(extvacuum->table.missed_dead_tuples); + + values[i++] = Int32GetDatum(extvacuum->common.wraparound_failsafe_count); + values[i++] = Int64GetDatum(extvacuum->table.index_vacuum_count); + + values[i++] = Int64GetDatum(extvacuum->common.wal_records); + values[i++] = Int64GetDatum(extvacuum->common.wal_fpi); + + /* Convert to numeric, like pg_stat_statements */ + snprintf(buf, sizeof buf, UINT64_FORMAT, extvacuum->common.wal_bytes); + values[i++] = DirectFunctionCall3(numeric_in, + CStringGetDatum(buf), + ObjectIdGetDatum(0), + Int32GetDatum(-1)); + + values[i++] = Float8GetDatum(extvacuum->common.blk_read_time); + values[i++] = Float8GetDatum(extvacuum->common.blk_write_time); + values[i++] = Float8GetDatum(extvacuum->common.delay_time); + values[i++] = Float8GetDatum(extvacuum->common.total_time); + + Assert(i == PG_STAT_GET_VACUUM_TABLES_STATS_COLS); + + /* Returns the record as Datum */ + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} + +/* + * Get the vacuum statistics for the heap tables. + */ +Datum +pg_stat_get_vacuum_indexes(PG_FUNCTION_ARGS) +{ +#define PG_STAT_GET_VACUUM_INDEX_STATS_COLS 16 + + Oid relid = PG_GETARG_OID(0); + PgStat_VacuumRelationCounts *extvacuum; + PgStat_VacuumRelationCounts *pending; + TupleDesc tupdesc; + Datum values[PG_STAT_GET_VACUUM_INDEX_STATS_COLS] = {0}; + bool nulls[PG_STAT_GET_VACUUM_INDEX_STATS_COLS] = {0}; + char buf[256]; + int i = 0; + + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + pending = pgstat_fetch_stat_vacuum_tabentry(relid, MyDatabaseId); + + if (!pending) + { + pending = pgstat_fetch_stat_vacuum_tabentry(relid, 0); + + if (!pending) + { + InitMaterializedSRF(fcinfo, 0); + PG_RETURN_VOID(); + } + } + + extvacuum = pending; + + i = 0; + + values[i++] = ObjectIdGetDatum(relid); + + values[i++] = Int64GetDatum(extvacuum->common.total_blks_read); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_hit); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_dirtied); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_written); + + values[i++] = Int64GetDatum(extvacuum->common.blks_fetched - + extvacuum->common.blks_hit); + values[i++] = Int64GetDatum(extvacuum->common.blks_hit); + + values[i++] = Int64GetDatum(extvacuum->index.pages_deleted); + values[i++] = Int64GetDatum(extvacuum->common.tuples_deleted); + + values[i++] = Int64GetDatum(extvacuum->common.wal_records); + values[i++] = Int64GetDatum(extvacuum->common.wal_fpi); + + /* Convert to numeric, like pg_stat_statements */ + snprintf(buf, sizeof buf, UINT64_FORMAT, extvacuum->common.wal_bytes); + values[i++] = DirectFunctionCall3(numeric_in, + CStringGetDatum(buf), + ObjectIdGetDatum(0), + Int32GetDatum(-1)); + + values[i++] = Float8GetDatum(extvacuum->common.blk_read_time); + values[i++] = Float8GetDatum(extvacuum->common.blk_write_time); + values[i++] = Float8GetDatum(extvacuum->common.delay_time); + values[i++] = Float8GetDatum(extvacuum->common.total_time); + + Assert(i == PG_STAT_GET_VACUUM_INDEX_STATS_COLS); + + /* Returns the record as Datum */ + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} + +Datum +pg_stat_get_vacuum_database(PG_FUNCTION_ARGS) +{ +#define PG_STAT_GET_VACUUM_DATABASE_STATS_COLS 14 + + Oid dbid = PG_GETARG_OID(0); + PgStat_VacuumDBCounts *extvacuum; + PgStat_VacuumDBCounts *pending; + TupleDesc tupdesc; + Datum values[PG_STAT_GET_VACUUM_DATABASE_STATS_COLS] = {0}; + bool nulls[PG_STAT_GET_VACUUM_DATABASE_STATS_COLS] = {0}; + char buf[256]; + int i = 0; + + + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + InitMaterializedSRF(fcinfo, 0); + + if (!OidIsValid(dbid)) + PG_RETURN_VOID(); + + pending = pgstat_fetch_stat_vacuum_dbentry(dbid); + + if (!pending) + PG_RETURN_VOID(); + + extvacuum = pending; + + i = 0; + + values[i++] = ObjectIdGetDatum(dbid); + + values[i++] = Int64GetDatum(extvacuum->common.total_blks_read); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_hit); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_dirtied); + values[i++] = Int64GetDatum(extvacuum->common.total_blks_written); + + values[i++] = Int64GetDatum(extvacuum->common.wal_records); + values[i++] = Int64GetDatum(extvacuum->common.wal_fpi); + + /* Convert to numeric, like pg_stat_statements */ + snprintf(buf, sizeof buf, UINT64_FORMAT, extvacuum->common.wal_bytes); + values[i++] = DirectFunctionCall3(numeric_in, + CStringGetDatum(buf), + ObjectIdGetDatum(0), + Int32GetDatum(-1)); + + values[i++] = Float8GetDatum(extvacuum->common.blk_read_time); + values[i++] = Float8GetDatum(extvacuum->common.blk_write_time); + values[i++] = Float8GetDatum(extvacuum->common.delay_time); + values[i++] = Float8GetDatum(extvacuum->common.total_time); + values[i++] = Int32GetDatum(extvacuum->common.wraparound_failsafe_count); + values[i++] = Int32GetDatum(extvacuum->errors); + + Assert(i == PG_STAT_GET_VACUUM_DATABASE_STATS_COLS); + + /* Returns the record as Datum */ + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat index f0260e6e412..dd4c4522355 100644 --- a/src/backend/utils/misc/guc_parameters.dat +++ b/src/backend/utils/misc/guc_parameters.dat @@ -3091,6 +3091,12 @@ boot_val => 'false', }, +{ name => 'track_vacuum_statistics', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects vacuum statistics for vacuum activity.', + variable => 'pgstat_track_vacuum_statistics', + boot_val => 'false', +}, + { name => 'track_wal_io_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', short_desc => 'Collects timing statistics for WAL I/O activity.', variable => 'track_wal_io_timing', diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index c4f92fcdac8..b079ebead05 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -669,6 +669,7 @@ #track_wal_io_timing = off #track_functions = none # none, pl, all #stats_fetch_consistency = cache # cache, none, snapshot +#track_vacuum_statistics = off # - Monitoring - diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 961337ce282..93f162c2133 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -12640,6 +12640,15 @@ proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}', prosrc => 'pg_get_aios' }, +{ oid => '8001', + descr => 'pg_stat_get_vacuum_tables returns vacuum stats values for table', + proname => 'pg_stat_get_vacuum_tables', prorows => 1000, provolatile => 's', prorettype => 'record',proisstrict => 'f', + proretset => 't', + proargtypes => 'oid', + proallargtypes => '{oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,int4,int8,int8,int8,numeric,float8,float8,float8,float8}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{reloid,relid,total_blks_read,total_blks_hit,total_blks_dirtied,total_blks_written,rel_blks_read,rel_blks_hit,pages_scanned,pages_removed,vm_new_frozen_pages,vm_new_visible_pages,vm_new_visible_frozen_pages,missed_dead_pages,tuples_deleted,tuples_frozen,recently_dead_tuples,missed_dead_tuples,wraparound_failsafe,index_vacuum_count,wal_records,wal_fpi,wal_bytes,blk_read_time,blk_write_time,delay_time,total_time}', + prosrc => 'pg_stat_get_vacuum_tables' }, # oid8 related functions { oid => '8255', descr => 'convert oid to oid8', proname => 'oid8', prorettype => 'oid8', proargtypes => 'oid', @@ -12705,4 +12714,22 @@ proname => 'pg_stat_get_rev_all_frozen_pages', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_rev_all_frozen_pages' }, +{ oid => '8004', + descr => 'pg_stat_get_vacuum_indexes returns vacuum stats values for index', + proname => 'pg_stat_get_vacuum_indexes', prorows => 1000, provolatile => 's', prorettype => 'record',proisstrict => 'f', + proretset => 't', + proargtypes => 'oid', + proallargtypes => '{oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,numeric,float8,float8,float8,float8}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{reloid,relid,total_blks_read,total_blks_hit,total_blks_dirtied,total_blks_written,rel_blks_read,rel_blks_hit,pages_deleted,tuples_deleted,wal_records,wal_fpi,wal_bytes,blk_read_time,blk_write_time,delay_time,total_time}', + prosrc => 'pg_stat_get_vacuum_indexes' }, +{ oid => '8005', + descr => 'pg_stat_get_vacuum_database returns vacuum stats values for database', + proname => 'pg_stat_get_vacuum_database', prorows => 1000, provolatile => 's', prorettype => 'record',proisstrict => 'f', + proretset => 't', + proargtypes => 'oid', + proallargtypes => '{oid,oid,int8,int8,int8,int8,int8,int8,numeric,float8,float8,float8,float8,int4,int4}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{dbid,dboid,db_blks_read,db_blks_hit,total_blks_dirtied,total_blks_written,wal_records,wal_fpi,wal_bytes,blk_read_time,blk_write_time,delay_time,total_time,wraparound_failsafe,errors}', + prosrc => 'pg_stat_get_vacuum_database' }, ] diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index e885a4b9c77..bf4ddf74568 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -25,6 +25,7 @@ #include "storage/buf.h" #include "storage/lock.h" #include "utils/relcache.h" +#include "pgstat.h" /* * Flags for amparallelvacuumoptions to control the participation of bulkdelete @@ -300,6 +301,26 @@ typedef struct VacDeadItemsInfo int64 num_items; /* current # of entries */ } VacDeadItemsInfo; +/* + * Counters and usage data for extended stats tracking. + */ +typedef struct LVExtStatCounters +{ + TimestampTz starttime; + WalUsage walusage; + BufferUsage bufusage; + double VacuumDelayTime; + PgStat_Counter blocks_fetched; + PgStat_Counter blocks_hit; +} LVExtStatCounters; + +typedef struct LVExtStatCountersIdx +{ + LVExtStatCounters common; + int64 pages_deleted; + int64 tuples_removed; +} LVExtStatCountersIdx; + /* GUC parameters */ extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */ extern PGDLLIMPORT int vacuum_freeze_min_age; @@ -332,6 +353,7 @@ extern PGDLLIMPORT double vacuum_max_eager_freeze_failure_rate; extern PGDLLIMPORT pg_atomic_uint32 *VacuumSharedCostBalance; extern PGDLLIMPORT pg_atomic_uint32 *VacuumActiveNWorkers; extern PGDLLIMPORT int VacuumCostBalanceLocal; +extern PGDLLIMPORT double VacuumDelayTime; extern PGDLLIMPORT bool VacuumFailsafeActive; extern PGDLLIMPORT double vacuum_cost_delay; @@ -412,4 +434,8 @@ extern double anl_random_fract(void); extern double anl_init_selection_state(int n); extern double anl_get_next_S(double t, int n, double *stateptr); +extern void extvac_stats_start_idx(Relation rel, IndexBulkDeleteResult *stats, + LVExtStatCountersIdx * counters); +extern void extvac_stats_end_idx(Relation rel, IndexBulkDeleteResult *stats, + LVExtStatCountersIdx * counters, PgStat_VacuumRelationCounts * report); #endif /* VACUUM_H */ diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 072065adc90..9f1a1ee5c23 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -114,6 +114,15 @@ typedef struct PgStat_BackendSubEntry PgStat_Counter conflict_count[CONFLICT_NUM_TYPES]; } PgStat_BackendSubEntry; +/* Type of ExtVacReport */ +typedef enum ExtVacReportType +{ + PGSTAT_EXTVAC_INVALID = 0, + PGSTAT_EXTVAC_TABLE = 1, + PGSTAT_EXTVAC_INDEX = 2, + PGSTAT_EXTVAC_DB = 3, +} ExtVacReportType; + /* ---------- * PgStat_TableCounts The actual per-table counts kept by a backend * @@ -161,6 +170,112 @@ typedef struct PgStat_TableCounts PgStat_Counter rev_all_frozen_pages; } PgStat_TableCounts; +typedef struct PgStat_CommonCounts +{ + /* blocks */ + int64 total_blks_read; + int64 total_blks_hit; + int64 total_blks_dirtied; + int64 total_blks_written; + + /* heap blocks */ + int64 blks_fetched; + int64 blks_hit; + + /* WAL */ + int64 wal_records; + int64 wal_fpi; + uint64 wal_bytes; + + /* Time */ + double blk_read_time; + double blk_write_time; + double delay_time; + double total_time; + + /* tuples */ + int64 tuples_deleted; + + /* failsafe */ + int32 wraparound_failsafe_count; +} PgStat_CommonCounts; + +/* ---------- + * + * PgStat_VacuumRelationCounts + * + * Additional statistics of vacuum processing over a relation. + * pages_removed is the amount by which the physically shrank, + * if any (ie the change in its total size on disk) + * pages_deleted refer to free space within the index file + * ---------- + */ +typedef struct PgStat_VacuumRelationCounts +{ + PgStat_CommonCounts common; + + ExtVacReportType type; /* heap, index, etc. */ + + /* ---------- + * + * There are separate metrics of statistic for tables and indexes, + * which collect during vacuum. + * The union operator allows to combine these statistics + * so that each metric is assigned to a specific class of collected statistics. + * Such a combined structure was called per_type_stats. + * The name of the structure itself is not used anywhere, + * it exists only for understanding the code. + * ---------- + */ + union + { + struct + { + int64 tuples_frozen; /* tuples frozen up by vacuum */ + int64 recently_dead_tuples; /* deleted tuples that are + * still visible to some + * transaction */ + int64 missed_dead_tuples; /* tuples not pruned by vacuum due + * to failure to get a cleanup + * lock */ + int64 pages_scanned; /* heap pages examined (not skipped by + * VM) */ + int64 pages_removed; /* heap pages removed by vacuum + * "truncation" */ + int64 pages_frozen; /* pages marked in VM as frozen */ + int64 pages_all_visible; /* pages marked in VM as + * all-visible */ + int64 vm_new_frozen_pages; /* pages marked in VM as + * frozen */ + int64 vm_new_visible_pages; /* pages marked in VM as + * all-visible */ + int64 vm_new_visible_frozen_pages; /* pages marked in VM as + * all-visible and + * frozen */ + int64 missed_dead_pages; /* pages with missed dead tuples */ + int64 index_vacuum_count; /* number of index vacuumings */ + } table; + struct + { + int64 pages_deleted; /* number of pages deleted by vacuum */ + } index; + } /* per_type_stats */ ; +} PgStat_VacuumRelationCounts; + +typedef struct PgStat_VacuumRelationStatus +{ + Oid id; /* table's OID */ + bool shared; /* is it a shared catalog? */ + PgStat_VacuumRelationCounts counts; /* event counts to be sent */ +} PgStat_VacuumRelationStatus; + +typedef struct PgStat_VacuumDBCounts +{ + Oid dbjid; + PgStat_CommonCounts common; + int32 errors; +} PgStat_VacuumDBCounts; + /* ---------- * PgStat_TableStatus Per-table status within a backend * @@ -185,6 +300,12 @@ typedef struct PgStat_TableStatus Relation relation; /* rel that is using this entry */ } PgStat_TableStatus; +typedef struct PgStat_RelationVacuumPending +{ + Oid id; /* table's OID */ + PgStat_VacuumRelationCounts counts; /* event counts to be sent */ +} PgStat_RelationVacuumPending; + /* ---------- * PgStat_TableXactStatus Per-table, per-subtransaction status * ---------- @@ -812,6 +933,16 @@ extern int pgstat_get_transactional_drops(bool isCommit, struct xl_xact_stats_it extern void pgstat_execute_transactional_drops(int ndrops, struct xl_xact_stats_item *items, bool is_redo); +extern void pgstat_drop_vacuum_database(Oid databaseid); +extern void pgstat_vacuum_relation_delete_pending_cb(Oid relid); +extern void + pgstat_report_vacuum_extstats(Oid tableoid, bool shared, + PgStat_VacuumRelationCounts * params); +extern PgStat_RelationVacuumPending * find_vacuum_relation_entry(Oid relid); +extern PgStat_VacuumDBCounts * pgstat_prep_vacuum_database_pending(Oid dboid); +extern PgStat_VacuumRelationCounts * pgstat_fetch_stat_vacuum_tabentry(Oid relid, Oid dbid); +PgStat_VacuumDBCounts *pgstat_fetch_stat_vacuum_dbentry(Oid dbid); + /* * Functions in pgstat_wal.c */ @@ -828,7 +959,8 @@ extern PgStat_WalStats *pgstat_fetch_stat_wal(void); extern PGDLLIMPORT bool pgstat_track_counts; extern PGDLLIMPORT int pgstat_track_functions; extern PGDLLIMPORT int pgstat_fetch_consistency; - +extern PGDLLIMPORT bool pgstat_track_vacuum_statistics; +extern PGDLLIMPORT bool pgstat_track_vacuum_statistics_for_relations; /* * Variables in pgstat_bgwriter.c diff --git a/src/include/utils/pgstat_internal.h b/src/include/utils/pgstat_internal.h index 9b8fbae00ed..3ffb8395396 100644 --- a/src/include/utils/pgstat_internal.h +++ b/src/include/utils/pgstat_internal.h @@ -500,6 +500,18 @@ typedef struct PgStatShared_Relation PgStat_StatTabEntry stats; } PgStatShared_Relation; +typedef struct PgStatShared_VacuumDB +{ + PgStatShared_Common header; + PgStat_VacuumDBCounts stats; +} PgStatShared_VacuumDB; + +typedef struct PgStatShared_VacuumRelation +{ + PgStatShared_Common header; + PgStat_VacuumRelationCounts stats; +} PgStatShared_VacuumRelation; + typedef struct PgStatShared_Function { PgStatShared_Common header; @@ -678,6 +690,9 @@ extern PgStat_EntryRef *pgstat_fetch_pending_entry(PgStat_Kind kind, extern void *pgstat_fetch_entry(PgStat_Kind kind, Oid dboid, uint64 objid); extern void pgstat_snapshot_fixed(PgStat_Kind kind); +bool pgstat_vacuum_db_flush_cb(PgStat_EntryRef *entry_ref, bool nowait); +extern bool pgstat_vacuum_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait); + /* * Functions in pgstat_archiver.c diff --git a/src/include/utils/pgstat_kind.h b/src/include/utils/pgstat_kind.h index c30b6235623..ded9767b677 100644 --- a/src/include/utils/pgstat_kind.h +++ b/src/include/utils/pgstat_kind.h @@ -38,9 +38,11 @@ #define PGSTAT_KIND_IO 10 #define PGSTAT_KIND_SLRU 11 #define PGSTAT_KIND_WAL 12 +#define PGSTAT_KIND_VACUUM_DB 13 +#define PGSTAT_KIND_VACUUM_RELATION 14 #define PGSTAT_KIND_BUILTIN_MIN PGSTAT_KIND_DATABASE -#define PGSTAT_KIND_BUILTIN_MAX PGSTAT_KIND_WAL +#define PGSTAT_KIND_BUILTIN_MAX PGSTAT_KIND_VACUUM_RELATION #define PGSTAT_KIND_BUILTIN_SIZE (PGSTAT_KIND_BUILTIN_MAX + 1) /* Custom stats kinds */ diff --git a/src/test/isolation/expected/vacuum-extending-in-repetable-read.out b/src/test/isolation/expected/vacuum-extending-in-repetable-read.out new file mode 100644 index 00000000000..6d960423912 --- /dev/null +++ b/src/test/isolation/expected/vacuum-extending-in-repetable-read.out @@ -0,0 +1,53 @@ +unused step name: s2_delete +Parsed test spec with 2 sessions + +starting permutation: s2_insert s2_print_vacuum_stats_table s1_begin_repeatable_read s2_update s2_insert_interrupt s2_vacuum s2_print_vacuum_stats_table s1_commit s2_checkpoint s2_vacuum s2_print_vacuum_stats_table +step s2_insert: INSERT INTO test_vacuum_stat_isolation(id, ival) SELECT ival, ival%10 FROM generate_series(1,1000) As ival; +step s2_print_vacuum_stats_table: + SELECT + vt.relname, vt.tuples_deleted, vt.recently_dead_tuples, vt.missed_dead_tuples, vt.missed_dead_pages, vt.tuples_frozen + FROM pg_stat_vacuum_tables vt, pg_class c + WHERE vt.relname = 'test_vacuum_stat_isolation' AND vt.relid = c.oid; + +relname |tuples_deleted|recently_dead_tuples|missed_dead_tuples|missed_dead_pages|tuples_frozen +--------------------------+--------------+--------------------+------------------+-----------------+------------- +test_vacuum_stat_isolation| 0| 0| 0| 0| 0 +(1 row) + +step s1_begin_repeatable_read: + BEGIN transaction ISOLATION LEVEL REPEATABLE READ; + select count(ival) from test_vacuum_stat_isolation where id>900; + +count +----- + 100 +(1 row) + +step s2_update: UPDATE test_vacuum_stat_isolation SET ival = ival + 2 where id > 900; +step s2_insert_interrupt: INSERT INTO test_vacuum_stat_isolation values (1,1); +step s2_vacuum: VACUUM test_vacuum_stat_isolation; +step s2_print_vacuum_stats_table: + SELECT + vt.relname, vt.tuples_deleted, vt.recently_dead_tuples, vt.missed_dead_tuples, vt.missed_dead_pages, vt.tuples_frozen + FROM pg_stat_vacuum_tables vt, pg_class c + WHERE vt.relname = 'test_vacuum_stat_isolation' AND vt.relid = c.oid; + +relname |tuples_deleted|recently_dead_tuples|missed_dead_tuples|missed_dead_pages|tuples_frozen +--------------------------+--------------+--------------------+------------------+-----------------+------------- +test_vacuum_stat_isolation| 0| 600| 0| 0| 0 +(1 row) + +step s1_commit: COMMIT; +step s2_checkpoint: CHECKPOINT; +step s2_vacuum: VACUUM test_vacuum_stat_isolation; +step s2_print_vacuum_stats_table: + SELECT + vt.relname, vt.tuples_deleted, vt.recently_dead_tuples, vt.missed_dead_tuples, vt.missed_dead_pages, vt.tuples_frozen + FROM pg_stat_vacuum_tables vt, pg_class c + WHERE vt.relname = 'test_vacuum_stat_isolation' AND vt.relid = c.oid; + +relname |tuples_deleted|recently_dead_tuples|missed_dead_tuples|missed_dead_pages|tuples_frozen +--------------------------+--------------+--------------------+------------------+-----------------+------------- +test_vacuum_stat_isolation| 300| 600| 0| 0| 303 +(1 row) + diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule index 6a4d3532e03..6c3bce90d6d 100644 --- a/src/test/isolation/isolation_schedule +++ b/src/test/isolation/isolation_schedule @@ -100,6 +100,7 @@ test: timeouts test: vacuum-concurrent-drop test: vacuum-conflict test: vacuum-skip-locked +test: vacuum-extending-in-repetable-read test: stats test: horizons test: predicate-hash diff --git a/src/test/isolation/specs/vacuum-extending-in-repetable-read.spec b/src/test/isolation/specs/vacuum-extending-in-repetable-read.spec new file mode 100644 index 00000000000..cfec3159580 --- /dev/null +++ b/src/test/isolation/specs/vacuum-extending-in-repetable-read.spec @@ -0,0 +1,59 @@ +# Test for checking recently_dead_tuples, tuples_deleted and frozen tuples in pg_stat_vacuum_tables. +# recently_dead_tuples values are counted when vacuum hasn't cleared tuples because they were deleted recently. +# recently_dead_tuples aren't increased after releasing lock compared with tuples_deleted, which increased +# by the value of the cleared tuples that the vacuum managed to clear. + +setup +{ + CREATE TABLE test_vacuum_stat_isolation(id int, ival int) WITH (autovacuum_enabled = off); + SET track_io_timing = on; + SET track_vacuum_statistics TO 'on'; +} + +teardown +{ + DROP TABLE test_vacuum_stat_isolation CASCADE; + RESET track_io_timing; + RESET track_vacuum_statistics; +} + +session s1 +setup { + SET track_vacuum_statistics TO 'on'; + } +step s1_begin_repeatable_read { + BEGIN transaction ISOLATION LEVEL REPEATABLE READ; + select count(ival) from test_vacuum_stat_isolation where id>900; + } +step s1_commit { COMMIT; } + +session s2 +setup { + SET track_vacuum_statistics TO 'on'; + } +step s2_insert { INSERT INTO test_vacuum_stat_isolation(id, ival) SELECT ival, ival%10 FROM generate_series(1,1000) As ival; } +step s2_update { UPDATE test_vacuum_stat_isolation SET ival = ival + 2 where id > 900; } +step s2_delete { DELETE FROM test_vacuum_stat_isolation where id > 900; } +step s2_insert_interrupt { INSERT INTO test_vacuum_stat_isolation values (1,1); } +step s2_vacuum { VACUUM test_vacuum_stat_isolation; } +step s2_checkpoint { CHECKPOINT; } +step s2_print_vacuum_stats_table +{ + SELECT + vt.relname, vt.tuples_deleted, vt.recently_dead_tuples, vt.missed_dead_tuples, vt.missed_dead_pages, vt.tuples_frozen + FROM pg_stat_vacuum_tables vt, pg_class c + WHERE vt.relname = 'test_vacuum_stat_isolation' AND vt.relid = c.oid; +} + +permutation + s2_insert + s2_print_vacuum_stats_table + s1_begin_repeatable_read + s2_update + s2_insert_interrupt + s2_vacuum + s2_print_vacuum_stats_table + s1_commit + s2_checkpoint + s2_vacuum + s2_print_vacuum_stats_table \ No newline at end of file diff --git a/src/test/recovery/t/052_vacuum_extending_basic_test.pl b/src/test/recovery/t/052_vacuum_extending_basic_test.pl new file mode 100644 index 00000000000..a5a0c195e67 --- /dev/null +++ b/src/test/recovery/t/052_vacuum_extending_basic_test.pl @@ -0,0 +1,737 @@ +# Copyright (c) 2025 PostgreSQL Global Development Group +# Test cumulative vacuum stats system using TAP +# +# This test validates the accuracy and behavior of cumulative vacuum statistics +# across heap tables, indexes, and databases using: +# +# • pg_stat_vacuum_tables +# • pg_stat_vacuum_indexes +# • pg_stat_vacuum_database +# +# A polling helper function repeatedly checks the stats views until expected +# deltas appear or a configurable timeout expires. This guarantees that +# stats-collector propagation delays do not lead to flaky test behavior. + +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +#------------------------------------------------------------------------------ +# Test harness setup +#------------------------------------------------------------------------------ + +my $node = PostgreSQL::Test::Cluster->new('stat_vacuum'); +$node->init; + +# Configure the server logging level for the test +$node->append_conf('postgresql.conf', q{ + log_min_messages = notice + track_vacuum_statistics = on +}); + +my $stderr; +my $base_stats; +my $wals; +my $ibase_stats; +my $iwals; + +$node->start( + '>' => \$base_stats, + '2>' => \$stderr +); + +#------------------------------------------------------------------------------ +# Database creation and initialization +#------------------------------------------------------------------------------ + +$node->safe_psql('postgres', q{ + CREATE DATABASE statistic_vacuum_database_regression; +}); +# Main test database name and number of rows to insert +my $dbname = 'statistic_vacuum_database_regression'; +my $size_tab = 1000; + +# Enable required session settings and force the stats collector to flush next +$node->safe_psql($dbname, q{ + SET track_functions = 'all'; + SELECT pg_stat_force_next_flush(); +}); + +#------------------------------------------------------------------------------ +# Create test table and populate it +#------------------------------------------------------------------------------ + +$node->safe_psql( + $dbname, + "CREATE TABLE vestat (x int PRIMARY KEY) + WITH (autovacuum_enabled = off, fillfactor = 10); + INSERT INTO vestat SELECT x FROM generate_series(1, $size_tab) AS g(x); + ANALYZE vestat;" +); + +#------------------------------------------------------------------------------ +# Timing parameters for polling loops +#------------------------------------------------------------------------------ + +my $timeout = 30; # overall wait timeout in seconds +my $interval = 0.015; # poll interval in seconds (15 ms) +my $start_time = time(); +my $updated = 0; + +#------------------------------------------------------------------------------ +# wait_for_vacuum_stats +# +# Polls pg_stat_vacuum_tables and pg_stat_vacuum_indexes until both the +# table-level and index-level counters exceed the provided baselines, or until +# the configured timeout elapses. +# +# Expected named args (baseline values): +# tab_tuples_deleted +# tab_wal_records +# idx_tuples_deleted +# idx_wal_records +# +# Returns: 1 if the condition is met before timeout, 0 otherwise. +#------------------------------------------------------------------------------ + +sub wait_for_vacuum_stats { + my (%args) = @_; + my $tab_tuples_deleted = ($args{tab_tuples_deleted} or 0); + my $tab_wal_records = ($args{tab_wal_records} or 0); + my $idx_tuples_deleted = ($args{idx_tuples_deleted} or 0); + my $idx_wal_records = ($args{idx_wal_records} or 0); + + my $start = time(); + while ((time() - $start) < $timeout) { + + my $result_query = $node->safe_psql( + $dbname, + "VACUUM vestat; + SELECT + (SELECT (tuples_deleted > $tab_tuples_deleted AND wal_records > $tab_wal_records) + FROM pg_stat_vacuum_tables + WHERE relname = 'vestat') + AND + (SELECT (tuples_deleted > $idx_tuples_deleted AND wal_records > $idx_wal_records) + FROM pg_stat_vacuum_indexes + WHERE indexrelname = 'vestat_pkey');" + ); + + return 1 if ($result_query eq 't'); + + sleep($interval); + } + + return 0; +} + +#------------------------------------------------------------------------------ +# Variables to hold vacuum-stat snapshots for later comparisons +#------------------------------------------------------------------------------ + +my $vm_new_visible_frozen_pages = 0; +my $tuples_deleted = 0; +my $pages_scanned = 0; +my $pages_removed = 0; +my $wal_records = 0; +my $wal_bytes = 0; +my $wal_fpi = 0; + +my $index_tuples_deleted = 0; +my $index_pages_deleted = 0; +my $index_wal_records = 0; +my $index_wal_bytes = 0; +my $index_wal_fpi = 0; + +my $vm_new_visible_frozen_pages_prev = 0; +my $tuples_deleted_prev = 0; +my $pages_scanned_prev = 0; +my $pages_removed_prev = 0; +my $wal_records_prev = 0; +my $wal_bytes_prev = 0; +my $wal_fpi_prev = 0; + +my $index_tuples_deleted_prev = 0; +my $index_pages_deleted_prev = 0; +my $index_wal_records_prev = 0; +my $index_wal_bytes_prev = 0; +my $index_wal_fpi_prev = 0; + +#------------------------------------------------------------------------------ +# fetch_vacuum_stats +# +# Reads current values of relevant vacuum counters for the test table and its +# primary index, storing them in package variables for subsequent comparisons. +#------------------------------------------------------------------------------ + +sub fetch_vacuum_stats { + # fetch actual base vacuum statistics + my $base_statistics = $node->safe_psql( + $dbname, + "SELECT vm_new_visible_frozen_pages, tuples_deleted, pages_scanned, pages_removed, wal_records, wal_bytes, wal_fpi + FROM pg_stat_vacuum_tables + WHERE relname = 'vestat';" + ); + + $base_statistics =~ s/\s*\|\s*/ /g; # transform " | " into space + ($vm_new_visible_frozen_pages, $tuples_deleted, $pages_scanned, $pages_removed, $wal_records, $wal_bytes, $wal_fpi) + = split /\s+/, $base_statistics; + + # --- index stats --- + my $index_base_statistics = $node->safe_psql( + $dbname, + "SELECT tuples_deleted, pages_deleted, wal_records, wal_bytes, wal_fpi + FROM pg_stat_vacuum_indexes + WHERE indexrelname = 'vestat_pkey';" + ); + + $index_base_statistics =~ s/\s*\|\s*/ /g; # transform " | " into space + ($index_tuples_deleted, $index_pages_deleted, $index_wal_records, $index_wal_bytes, $index_wal_fpi) + = split /\s+/, $index_base_statistics; +} + +#------------------------------------------------------------------------------ +# save_vacuum_stats +# +# Save current values (previously fetched by fetch_vacuum_stats) so that we +# later fetch new values and compare them. +#------------------------------------------------------------------------------ +sub save_vacuum_stats { + $vm_new_visible_frozen_pages_prev = $vm_new_visible_frozen_pages; + $tuples_deleted_prev = $tuples_deleted; + $pages_scanned_prev = $pages_scanned; + $pages_removed_prev = $pages_removed; + $wal_records_prev = $wal_records; + $wal_bytes_prev = $wal_bytes; + $wal_fpi_prev = $wal_fpi; + + $index_tuples_deleted_prev = $index_tuples_deleted; + $index_pages_deleted_prev = $index_pages_deleted; + $index_wal_records_prev = $index_wal_records; + $index_wal_bytes_prev = $index_wal_bytes; + $index_wal_fpi_prev = $index_wal_fpi; +} + +#------------------------------------------------------------------------------ +# print_vacuum_stats_on_error +# +# Print values in case of an error +#------------------------------------------------------------------------------ +sub print_vacuum_stats_on_error { + diag( + "Statistics in the failed test\n" . + "Table statistics:\n" . + " Before test:\n" . + " vm_new_visible_frozen_pages = $vm_new_visible_frozen_pages_prev\n" . + " tuples_deleted = $tuples_deleted_prev\n" . + " pages_scanned = $pages_scanned_prev\n" . + " pages_removed = $pages_removed_prev\n" . + " wal_records = $wal_records_prev\n" . + " wal_bytes = $wal_bytes_prev\n" . + " wal_fpi = $wal_fpi_prev\n" . + " After test:\n" . + " vm_new_visible_frozen_pages = $vm_new_visible_frozen_pages\n" . + " tuples_deleted = $tuples_deleted\n" . + " pages_scanned = $pages_scanned\n" . + " pages_removed = $pages_removed\n" . + " wal_records = $wal_records\n" . + " wal_bytes = $wal_bytes\n" . + " wal_fpi = $wal_fpi\n" . + "Index statistics:\n" . + " Before test:\n" . + " tuples_deleted = $index_tuples_deleted_prev\n" . + " pages_deleted = $index_pages_deleted_prev\n" . + " wal_records = $index_wal_records_prev\n" . + " wal_bytes = $index_wal_bytes_prev\n" . + " wal_fpi = $index_wal_fpi_prev\n" . + " After test:\n" . + " tuples_deleted = $index_tuples_deleted\n" . + " pages_deleted = $index_pages_deleted\n" . + " wal_records = $index_wal_records\n" . + " wal_bytes = $index_wal_bytes\n" . + " wal_fpi = $index_wal_fpi\n" + ); +}; + +sub fetch_error_base_db_vacuum_statistics { + my (%args) = @_; + + # Validate presence of required args (allow 0 as valid numeric baseline) + die "database name required" + unless exists $args{database_name} && defined $args{database_name}; + my $database_name = $args{database_name}; + + # fetch actual base database vacuum statistics + my $base_statistics = $node->safe_psql( + $database_name, + "SELECT db_blks_hit, total_blks_dirtied, + total_blks_written, wal_records, + wal_fpi, wal_bytes + FROM pg_stat_vacuum_database, pg_database + WHERE pg_database.datname = '$dbname' + AND pg_database.oid = pg_stat_vacuum_database.dboid;" + ); + $base_statistics =~ s/\s*\|\s*/ /g; # transform " | " in space + my ($db_blks_hit, $total_blks_dirtied, $total_blks_written, + $wal_records, $wal_fpi, $wal_bytes) = split /\s+/, $base_statistics; + + diag( + "BASE STATS MISMATCH FOR DATABASE $dbname:\n" . + " db_blks_hit = $db_blks_hit\n" . + " total_blks_dirtied = $total_blks_dirtied\n" . + " total_blks_written = $total_blks_written\n" . + " wal_records = $wal_records\n" . + " wal_fpi = $wal_fpi\n" . + " wal_bytes = $wal_bytes\n" + ); +} + + +#------------------------------------------------------------------------------ +# Test 1: Delete half the rows, run VACUUM, and wait for stats to advance +#------------------------------------------------------------------------------ +subtest 'Test 1: Delete half the rows, run VACUUM' => sub +{ + +$node->safe_psql($dbname, "DELETE FROM vestat WHERE x % 2 = 0;"); +$node->safe_psql($dbname, "VACUUM vestat;"); + +# Poll the stats view until expected deltas appear or timeout +$updated = wait_for_vacuum_stats( + tab_tuples_deleted => 0, + tab_wal_records => 0, + idx_tuples_deleted => 0, + idx_wal_records => 0, +); +ok($updated, 'vacuum stats updated after vacuuming half-deleted table (tuples_deleted and wal_fpi advanced)') + or diag "Timeout waiting for pg_stat_vacuum_* update after $timeout seconds after vacuuming half-deleted table"; + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($tuples_deleted > $tuples_deleted_prev, 'table tuples_deleted has increased'); +ok($pages_scanned > $pages_scanned_prev, 'table pages_scanned has increased'); +ok($pages_removed == $pages_removed_prev, 'table pages_removed stay the same'); +ok($wal_records > $wal_records_prev, 'table wal_records has increased'); +ok($wal_bytes > $wal_bytes_prev, 'table wal_bytes has increased'); +ok($wal_fpi > $wal_fpi_prev, 'table wal_fpi has increased'); + +ok($index_pages_deleted == $index_pages_deleted_prev, 'index pages_deleted stay the same'); +ok($index_tuples_deleted > $index_tuples_deleted_prev, 'index tuples_deleted has increased'); +ok($index_wal_records > $index_wal_records_prev, 'index wal_records has increased'); +ok($index_wal_bytes > $index_wal_bytes_prev, 'index wal_bytes has increased'); +ok($index_wal_fpi == $index_wal_fpi_prev, 'index wal_fpi stay the same'); + +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 2: Delete all rows, run VACUUM, and wait for stats to advance +#------------------------------------------------------------------------------ +subtest 'Test 2: Delete all rows, run VACUUM' => sub +{ +save_vacuum_stats(); + +$node->safe_psql($dbname, "DELETE FROM vestat;"); +$node->safe_psql($dbname, "VACUUM vestat;"); + +$updated = wait_for_vacuum_stats( + tab_tuples_deleted => $tuples_deleted_prev, + tab_wal_records => $wal_records_prev, + idx_tuples_deleted => $index_tuples_deleted_prev, + idx_wal_records => $index_wal_records_prev, +); + +ok($updated, 'vacuum stats updated after vacuuming all-deleted table (tuples_deleted and wal_records advanced)') + or diag "Timeout waiting for pg_stat_vacuum_* update after $timeout seconds after vacuuming all-deleted table"; + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages > $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages has increased'); +ok($tuples_deleted > $tuples_deleted_prev, 'table tuples_deleted has increased'); +ok($pages_scanned > $pages_scanned_prev, 'table pages_scanned has increased'); +ok($pages_removed > $pages_removed_prev, 'table pages_removed has increased'); +ok($wal_records > $wal_records_prev, 'table wal_records has increased'); +ok($wal_bytes > $wal_bytes_prev, 'table wal_bytes has increased'); +ok($wal_fpi > 0, 'table wal_fpi has increased'); + +ok($index_pages_deleted > $index_pages_deleted_prev, 'index pages_deleted has increased'); +ok($index_tuples_deleted > $index_tuples_deleted_prev, 'index tuples_deleted has increased'); +ok($index_wal_records > $index_wal_records_prev, 'index wal_records has increased'); +ok($index_wal_bytes > $index_wal_bytes_prev, 'index wal_bytes has increased'); +ok($index_wal_fpi == $index_wal_fpi_prev, 'index wal_fpi stay the same'); + +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 3: Test VACUUM FULL — it should not report to the stats collector +#------------------------------------------------------------------------------ +subtest 'Test 3: Test VACUUM FULL — it should not report to the stats collector' => sub +{ +save_vacuum_stats(); + +$node->safe_psql( + $dbname, + "INSERT INTO vestat SELECT x FROM generate_series(1, $size_tab) AS g(x); + CHECKPOINT; + DELETE FROM vestat; + VACUUM FULL vestat;" +); + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($tuples_deleted == $tuples_deleted_prev, 'table tuples_deleted stay the same'); +ok($pages_scanned == $pages_scanned_prev, 'table pages_scanned stay the same'); +ok($pages_removed == $pages_removed_prev, 'table pages_removed stay the same'); +ok($wal_records == $wal_records_prev, 'table wal_records stay the same'); +ok($wal_bytes == $wal_bytes_prev, 'table wal_bytes stay the same'); +ok($wal_fpi == $wal_fpi_prev, 'table wal_fpi stay the same'); + +ok($index_pages_deleted == $index_pages_deleted_prev, 'index pages_deleted stay the same'); +ok($index_tuples_deleted == $index_tuples_deleted_prev, 'index tuples_deleted stay the same'); +ok($index_wal_records == $index_wal_records_prev, 'index wal_records stay the same'); +ok($index_wal_bytes == $index_wal_bytes_prev, 'index wal_bytes stay the same'); +ok($index_wal_fpi == $index_wal_fpi_prev, 'index wal_fpi stay the same'); + +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 4: Update table, checkpoint, and VACUUM to provoke WAL/FPI accounting +#------------------------------------------------------------------------------ +subtest 'Test 4: Update table, checkpoint, and VACUUM to provoke WAL/FPI accounting' => sub +{ + +save_vacuum_stats(); + +$node->safe_psql( + $dbname, + "INSERT INTO vestat SELECT x FROM generate_series(1, $size_tab) AS g(x); + CHECKPOINT; + UPDATE vestat SET x = x + 1000; + VACUUM vestat;" +); + +$updated = wait_for_vacuum_stats( + tab_tuples_deleted => $tuples_deleted_prev, + tab_wal_records => $wal_records_prev, + idx_tuples_deleted => $index_tuples_deleted_prev, + idx_wal_records => $index_wal_records_prev, +); + +ok($updated, 'vacuum stats updated after updating tuples in the table (tuples_deleted and wal_records advanced)') + or diag "Timeout waiting for pg_stat_vacuum_* update after $timeout seconds"; + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($tuples_deleted > $tuples_deleted_prev, 'table tuples_deleted has increased'); +ok($pages_scanned > $pages_scanned_prev, 'table pages_scanned has increased'); +ok($pages_removed == $pages_removed_prev, 'table pages_removed stay the same'); +ok($wal_records > $wal_records_prev, 'table wal_records has increased'); +ok($wal_bytes > $wal_bytes_prev, 'table wal_bytes has increased'); +ok($wal_fpi > $wal_fpi_prev, 'table wal_fpi has increased'); + +ok($index_pages_deleted > $index_pages_deleted_prev, 'index pages_deleted has increased'); +ok($index_tuples_deleted > $index_tuples_deleted_prev, 'index tuples_deleted has increased'); +ok($index_wal_records > $index_wal_records_prev, 'index wal_records has increased'); +ok($index_wal_bytes > $index_wal_bytes_prev, 'index wal_bytes has increased'); +ok($index_wal_fpi > $index_wal_fpi_prev, 'index wal_fpi has increased'); + +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 5: Update table, trancate and vacuuming +#------------------------------------------------------------------------------ +subtest 'Test 5: Update table, trancate and vacuuming' => sub +{ + +save_vacuum_stats(); + +$node->safe_psql( + $dbname, + "INSERT INTO vestat SELECT x FROM generate_series(1, $size_tab) AS g(x); + UPDATE vestat SET x = x + 1000;" +); +$node->safe_psql($dbname, "TRUNCATE vestat;"); +$node->safe_psql($dbname, "CHECKPOINT;"); +$node->safe_psql($dbname, "VACUUM vestat;"); + +$updated = wait_for_vacuum_stats( + tab_wal_records => $wal_records_prev, +); + +ok($updated, 'vacuum stats updated after updating tuples and trancation in the table (wal_records advanced)') + or diag "Timeout waiting for pg_stat_vacuum_* update after $timeout seconds"; + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($tuples_deleted == $tuples_deleted_prev, 'table tuples_deleted stay the same'); +ok($pages_scanned == $pages_scanned_prev, 'table pages_scanned stay the same'); +ok($pages_removed == $pages_removed_prev, 'table pages_removed stay the same'); +ok($wal_records > $wal_records_prev, 'table wal_records has increased'); +ok($wal_bytes > $wal_bytes_prev, 'table wal_bytes has increased'); +ok($wal_fpi == $wal_fpi_prev, 'table wal_fpi stay the same'); + +ok($index_pages_deleted == $index_pages_deleted_prev, 'index pages_deleted stay the same'); +ok($index_tuples_deleted == $index_tuples_deleted_prev, 'index tuples_deleted stay the same'); +ok($index_wal_records == $index_wal_records_prev, 'index wal_records stay the same'); +ok($index_wal_bytes == $index_wal_bytes_prev, 'index wal_bytes stay the same'); +ok($index_wal_fpi == $index_wal_fpi_prev, 'index wal_fpi stay the same'); + +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 6: Delete all tuples from table, trancate, and vacuuming +#------------------------------------------------------------------------------ +subtest 'Test 6: Delete all tuples from table, trancate, and vacuuming' => sub +{ + +save_vacuum_stats(); + +$node->safe_psql( + $dbname, + "INSERT INTO vestat SELECT x FROM generate_series(1, $size_tab) AS g(x); + DELETE FROM vestat; + TRUNCATE vestat; + CHECKPOINT; + VACUUM vestat;" +); + +$updated = wait_for_vacuum_stats( + tab_wal_records => $wal_records, +); + +ok($updated, 'vacuum stats updated after deleting all tuples and trancation in the table (wal_records advanced)') + or diag "Timeout waiting for pg_stat_vacuum_* update after $timeout seconds"; + +fetch_vacuum_stats(); + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($tuples_deleted == $tuples_deleted_prev, 'table tuples_deleted stay the same'); +ok($pages_scanned == $pages_scanned_prev, 'table pages_scanned stay the same'); +ok($pages_removed == $pages_removed_prev, 'table pages_removed stay the same'); +ok($wal_records > $wal_records_prev, 'table wal_records has increased'); +ok($wal_bytes > $wal_bytes_prev, 'table wal_bytes has increased'); +ok($wal_fpi == $wal_fpi_prev, 'table wal_fpi stay the same'); + +ok($index_pages_deleted == $index_pages_deleted_prev, 'index pages_deleted stay the same'); +ok($index_tuples_deleted == $index_tuples_deleted_prev, 'index tuples_deleted stay the same'); +ok($index_wal_records == $index_wal_records_prev, 'index wal_records stay the same'); +ok($index_wal_bytes == $index_wal_bytes_prev, 'index wal_bytes stay the same'); +ok($index_wal_fpi == $index_wal_fpi_prev, 'index wal_fpi stay the same'); + +} or print_vacuum_stats_on_error(); + +my $dboid = $node->safe_psql( + $dbname, + "SELECT oid FROM pg_database WHERE datname = current_database();" +); + +#------------------------------------------------------------------------------------------------------- +# Test 7: Check if we return single vacuum statistics for particular relation from the current database +#------------------------------------------------------------------------------------------------------- +subtest 'Test 7: Check if we return vacuum statistics from the current database' => sub +{ +save_vacuum_stats(); + +my $reloid = $node->safe_psql( + $dbname, + q{ + SELECT oid FROM pg_class WHERE relname = 'vestat'; + } +); + +# Check if we can get vacuum statistics of particular heap relation in the current database +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) FROM pg_stat_get_vacuum_tables($reloid);" +); +is($base_stats, 1, 'heap vacuum stats return from the current relation and database as expected'); + +$reloid = $node->safe_psql( + $dbname, + q{ + SELECT oid FROM pg_class WHERE relname = 'vestat_pkey'; + } +); + +# Check if we can get vacuum statistics of particular index relation in the current database +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) FROM pg_stat_get_vacuum_indexes($reloid);" +); +is($base_stats, 1, 'index vacuum stats return from the current relation and database as expected'); + +# Check if we return empty results if vacuum statistics with particular oid doesn't exist +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) FROM pg_stat_get_vacuum_tables(1);" +); +is($base_stats, 0, 'table vacuum stats return no rows, as expected'); + +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) FROM pg_stat_get_vacuum_indexes(1);" +); +is($base_stats, 0, 'index vacuum stats return no rows, as expected'); + +# Check if we can get vacuum statistics of all relations in the current database +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) > 0 FROM pg_stat_vacuum_tables;" +); +ok($base_stats eq 't', 'vacuum stats per all heap objects available'); + +$base_stats = $node->safe_psql( + $dbname, + "SELECT count(*) > 0 FROM pg_stat_vacuum_indexes;" +); +ok($base_stats eq 't', 'vacuum stats per all index objects available'); +}; + +#------------------------------------------------------------------------------ +# Test 8: Check relation-level vacuum statistics from another database +#------------------------------------------------------------------------------ +subtest 'Test 8: Check relation-level vacuum statistics from another database' => sub +{ +$base_stats = $node->safe_psql( + 'postgres', + "SELECT count(*) + FROM pg_stat_vacuum_indexes + WHERE indexrelname = 'vestat_pkey';" +); +is($base_stats, 0, 'check the printing index vacuum extended statistics from another database are not available'); + +$base_stats = $node->safe_psql( + 'postgres', + "SELECT count(*) + FROM pg_stat_vacuum_tables + WHERE relname = 'vestat';" +); +is($base_stats, 0, 'check the printing heap vacuum extended statistics from another database are not available'); + +# Check that relations from another database are not visible in the view when querying from postgres +$base_stats = $node->safe_psql( + 'postgres', + "SELECT count(*) FROM pg_stat_vacuum_tables WHERE relname = 'vestat';" +); +is($base_stats, 0, 'vacuum stats per all tables objects from another database are not available as expected'); + +$base_stats = $node->safe_psql( + 'postgres', + "SELECT count(*) FROM pg_stat_vacuum_indexes WHERE indexrelname = 'vestat_pkey';" +); +is($base_stats, 0, 'vacuum stats per all index objects from another database are not available as expected'); +}; + +#------------------------------------------------------------------------------ +# Test 9: Cleanup checks: ensure functions return empty sets for OID = 0 +#------------------------------------------------------------------------------ +subtest 'Test 10: Cleanup checks: ensure functions return empty sets for OID = 0' => sub +{ +my $dboid = $node->safe_psql( + $dbname, + "SELECT oid FROM pg_database WHERE datname = current_database();" +); + +# Vacuum statistics for invalid relation OID return empty +$base_stats = $node->safe_psql( + $dbname, + q{ + SELECT COUNT(*) + FROM pg_stat_get_vacuum_tables(0); + } +); +is($base_stats, 0, 'vacuum stats per heap from invalid relation OID return empty as expected'); + +$base_stats = $node->safe_psql( + $dbname, + q{ + SELECT COUNT(*) + FROM pg_stat_get_vacuum_indexes(0); + } +); +is($base_stats, 0, 'vacuum stats per index from invalid relation OID return empty as expected'); + +$node->safe_psql($dbname, q{ + DROP TABLE vestat CASCADE; + VACUUM; +}); + +# Check that we don't print vacuum statistics for deleted objects +$base_stats = $node->safe_psql( + $dbname, + q{ + SELECT COUNT(*) + FROM pg_stat_vacuum_tables WHERE relid = 0; + } +); +is($base_stats, 0, 'pg_stat_vacuum_tables correctly returns no rows for OID = 0'); + +$base_stats = $node->safe_psql( + $dbname, + q{ + SELECT COUNT(*) + FROM pg_stat_vacuum_indexes WHERE relid = 0; + } +); +is($base_stats, 0, 'pg_stat_vacuum_indexes correctly returns no rows for OID = 0'); + +my $reloid = $node->safe_psql( + $dbname, + q{ + SELECT oid FROM pg_class WHERE relname = 'pg_shdepend'; + } +); + +$node->safe_psql($dbname, "VACUUM pg_shdepend;"); + +# Check if we can get vacuum statistics for cluster relations (shared catalogs) +$base_stats = $node->safe_psql( + $dbname, + qq{ + SELECT count(*) > 0 + FROM pg_stat_get_vacuum_tables($reloid); + } +); + +is($base_stats, 't', 'vacuum stats for common heap objects available'); + +my $indoid = $node->safe_psql( + $dbname, + q{ + SELECT oid FROM pg_class WHERE relname = 'pg_shdepend_reference_index'; + } +); + +$base_stats = $node->safe_psql( + $dbname, + qq{ + SELECT count(*) > 0 + FROM pg_stat_get_vacuum_indexes($indoid); + } +); + +is($base_stats, 't', 'vacuum stats for common index objects available'); + +$node->safe_psql('postgres', + "DROP DATABASE $dbname; + VACUUM;" +); + +$base_stats = $node->safe_psql( + 'postgres', + q{ + SELECT count(*) = 0 + FROM pg_stat_get_vacuum_database(0); + } +); +is($base_stats, 't', 'vacuum stats from database with invalid database OID return empty, as expected'); +}; + +$node->stop; + +done_testing(); diff --git a/src/test/recovery/t/053_vacuum_extending_freeze_test.pl b/src/test/recovery/t/053_vacuum_extending_freeze_test.pl new file mode 100644 index 00000000000..82089c013f4 --- /dev/null +++ b/src/test/recovery/t/053_vacuum_extending_freeze_test.pl @@ -0,0 +1,329 @@ +# Copyright (c) 2025 PostgreSQL Global Development Group +# +# Test cumulative vacuum stats system using TAP +# +# In short, this test validates the correctness and stability of cumulative +# vacuum statistics accounting around freezing, visibility, and revision +# tracking across multiple VACUUMs and backend operations. + +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +#------------------------------------------------------------------------------ +# Test cluster setup +#------------------------------------------------------------------------------ + +my $node = PostgreSQL::Test::Cluster->new('ext_stat_vacuum'); +$node->init; + +# Configure the server for aggressive freezing behavior used by the test +$node->append_conf('postgresql.conf', q{ + log_min_messages = notice + vacuum_freeze_min_age = 0 + vacuum_freeze_table_age = 0 + vacuum_multixact_freeze_min_age = 0 + vacuum_multixact_freeze_table_age = 0 + vacuum_max_eager_freeze_failure_rate = 1.0 + vacuum_failsafe_age = 0 + vacuum_multixact_failsafe_age = 0 + track_vacuum_statistics = on + track_functions = 'all' +}); + +$node->start(); + +#------------------------------------------------------------------------------ +# Database creation and initialization +#------------------------------------------------------------------------------ + +$node->safe_psql('postgres', q{ + CREATE DATABASE statistic_vacuum_database_regression; +}); + +# Main test database name +my $dbname = 'statistic_vacuum_database_regression'; + +# Enable necessary settings and force the stats collector to flush next +$node->safe_psql($dbname, q{ + SELECT pg_stat_force_next_flush(); +}); + +#------------------------------------------------------------------------------ +# Timing parameters for polling loops +#------------------------------------------------------------------------------ + +my $timeout = 30; # overall wait timeout in seconds +my $interval = 0.015; # poll interval in seconds (15 ms) +my $start_time = time(); +my $updated = 0; + +# wait_for_vacuum_stats +# +# Polls pg_stat_vacuum_tables until the named columns exceed the provided +# baseline values or until timeout. Callers should pass: +# +# tab_frozen_column => 'vm_new_frozen_pages' # column name (string) or 'rev_all_frozen_pages' +# tab_visible_column => 'vm_new_visible_pages' # column name (string) or 'rev_all_visible_pages' +# tab_all_frozen_pages_count => 0 # baseline numeric +# tab_all_visible_pages_count => 0 # baseline numeric +# run_vacuum => 0 or 1 # if true, run vacuum_sql before polling +# +# Returns: 1 if the condition is met before timeout, 0 otherwise. +sub wait_for_vacuum_stats { + my (%args) = @_; + + my $tab_frozen_column = $args{tab_frozen_column}; + my $tab_visible_column = $args{tab_visible_column}; + my $tab_all_frozen_pages_count = $args{tab_all_frozen_pages_count}; + my $tab_all_visible_pages_count = $args{tab_all_visible_pages_count}; + my $run_vacuum = $args{run_vacuum} ? 1 : 0; + my $result_query; + + my $start = time(); + my $sql; + my $vacuum_run = 0; + + # Run VACUUM once if requested, before polling + if ($run_vacuum) { + $node->safe_psql($dbname, 'VACUUM (FREEZE, VERBOSE) vestat'); + $vacuum_run = 1; + } + + while ((time() - $start) < $timeout) { + + if ($run_vacuum) { + $sql = " + SELECT (vm_new_visible_frozen_pages > $tab_all_frozen_pages_count) + FROM pg_stat_vacuum_tables + WHERE relname = 'vestat'"; + } + else { + $sql = " + SELECT (pg_stat_get_rev_all_frozen_pages(c.oid) > $tab_all_frozen_pages_count AND + pg_stat_get_rev_all_visible_pages(c.oid) > $tab_all_visible_pages_count) + FROM pg_class c + WHERE relname = 'vestat'"; + } + + $result_query = $node->safe_psql($dbname, $sql); + + return 1 if (defined $result_query && $result_query eq 't'); + + # sub-second sleep + sleep($interval); + } + + return 0; +} + +#------------------------------------------------------------------------------ +# Variables to hold vacuum statistics snapshots for comparisons +#------------------------------------------------------------------------------ + +my $vm_new_visible_frozen_pages = 0; + +my $rev_all_frozen_pages = 0; +my $rev_all_visible_pages = 0; + +my $vm_new_visible_frozen_pages_prev = 0; + +my $rev_all_frozen_pages_prev = 0; +my $rev_all_visible_pages_prev = 0; + +my $res; + +#------------------------------------------------------------------------------ +# fetch_vacuum_stats +# +# Loads current values of the relevant vacuum counters for the test table +# into the package-level variables above so tests can compare later. +#------------------------------------------------------------------------------ + +sub fetch_vacuum_stats { + # fetch actual base vacuum statistics + $vm_new_visible_frozen_pages = $node->safe_psql( + $dbname, + "SELECT vt.vm_new_visible_frozen_pages + FROM pg_stat_vacuum_tables vt + WHERE vt.relname = 'vestat';" + ); + + $rev_all_frozen_pages = $node->safe_psql( + $dbname, + "SELECT pg_stat_get_rev_all_frozen_pages(c.oid) + FROM pg_class c + WHERE c.relname = 'vestat';" + ); + + $rev_all_visible_pages = $node->safe_psql( + $dbname, + "SELECT pg_stat_get_rev_all_visible_pages(c.oid) + FROM pg_class c + WHERE c.relname = 'vestat';" + ); +} + +#------------------------------------------------------------------------------ +# save_vacuum_stats +# +# Save current values (previously fetched by fetch_vacuum_stats) so that we +# later fetch new values and compare them. +#------------------------------------------------------------------------------ +sub save_vacuum_stats { + $vm_new_visible_frozen_pages_prev = $vm_new_visible_frozen_pages; + $rev_all_frozen_pages_prev = $rev_all_frozen_pages; + $rev_all_visible_pages_prev = $rev_all_visible_pages; +} + +#------------------------------------------------------------------------------ +# print_vacuum_stats_on_error +# +# Print values in case of an error +#------------------------------------------------------------------------------ +sub print_vacuum_stats_on_error { + diag( + "Statistics in the failed test\n" . + "Table statistics:\n" . + " Before test:\n" . + " vm_new_visible_frozen_pages = $vm_new_visible_frozen_pages_prev\n" . + " rev_all_frozen_pages = $rev_all_frozen_pages_prev\n" . + " rev_all_visible_pages = $rev_all_visible_pages_prev\n" . + "Statistics in the failed test\n" . + "Table statistics:\n" . + " After test:\n" . + " vm_new_visible_frozen_pages = $vm_new_visible_frozen_pages\n" . + " rev_all_frozen_pages = $rev_all_frozen_pages\n" . + " rev_all_visible_pages = $rev_all_visible_pages\n" + ); +}; + +#------------------------------------------------------------------------------ +# Test 1: Create test table, populate it and run an initial vacuum to force freezing +#------------------------------------------------------------------------------ + +subtest 'Test 1: Create test table, populate it and run an initial vacuum to force freezing' => sub +{ +$node->safe_psql($dbname, q{ + CREATE TABLE vestat (x int) + WITH (autovacuum_enabled = off, fillfactor = 10); + INSERT INTO vestat SELECT x FROM generate_series(1, 1000) AS g(x); + ANALYZE vestat; + VACUUM (FREEZE, VERBOSE) vestat; +}); + +# Poll the stats view until the expected deltas appear or timeout. +# We do not expect rev_all_* counters to change here, so we pass -1 for them. +$updated = wait_for_vacuum_stats( + tab_all_frozen_pages_count => 0, + tab_all_visible_pages_count => 0, + run_vacuum => 1, +); + +ok($updated, + 'vacuum stats updated after vacuuming the table (vm_new_visible_frozen_pages advanced)') + or diag "Timeout waiting for pg_stat_vacuum_tables to update after $timeout seconds during vacuum"; + +#------------------------------------------------------------------------------ +# Snapshot current statistics for later comparison +#------------------------------------------------------------------------------ + +fetch_vacuum_stats(); + +#------------------------------------------------------------------------------ +# Verify initial statistics after vacuum +#------------------------------------------------------------------------------ +ok($vm_new_visible_frozen_pages > $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages has increased'); +ok($rev_all_frozen_pages == $rev_all_frozen_pages_prev, 'table rev_all_frozen_pages stay the same'); +ok($rev_all_visible_pages == $rev_all_visible_pages_prev, 'table rev_all_visible_pages stay the same'); +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 2: Trigger backend updates +# Backend activity should reset per-page visibility/freeze marks and increment revision counters +#------------------------------------------------------------------------------ +subtest 'Test 2: Trigger backend updates' => sub +{ +save_vacuum_stats(); + +$node->safe_psql($dbname, q{ + UPDATE vestat SET x = x + 1001; +}); + +# Poll until stats update or timeout. +# We do not expect vm_new_visible_frozen_pages or pages_all_visible to change here, +# so we pass -1 for those counters. +$updated = wait_for_vacuum_stats( + tab_frozen_column => 'rev_all_frozen_pages', + tab_visible_column => 'rev_all_visible_pages', + tab_all_frozen_pages_count => 0, + tab_all_visible_pages_count => 0, + run_vacuum => 0, +); +ok($updated, + 'vacuum stats updated after backend tuple updates (rev_all_frozen_pages and rev_all_visible_pages advanced)') + or diag "Timeout waiting for pg_stats_vacuum_* update after $timeout seconds"; + +#------------------------------------------------------------------------------ +# Snapshot current statistics for later comparison +#------------------------------------------------------------------------------ + +fetch_vacuum_stats(); + +#------------------------------------------------------------------------------ +# Check updated statistics after backend activity +#------------------------------------------------------------------------------ + +ok($vm_new_visible_frozen_pages == $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages stay the same'); +ok($rev_all_frozen_pages > $rev_all_frozen_pages_prev, 'table rev_all_frozen_pages has increased'); +ok($rev_all_visible_pages > $rev_all_visible_pages_prev, 'table rev_all_visible_pages has increased'); +} or print_vacuum_stats_on_error(); + +#------------------------------------------------------------------------------ +# Test 3: Force another vacuum after backend modifications - vacuum should restore freeze/visibility +#------------------------------------------------------------------------------ +subtest 'Test 3: Force another vacuum after backend modifications - vacuum should restore freeze/visibility' => sub +{ +save_vacuum_stats(); + +$node->safe_psql($dbname, q{ VACUUM vestat; }); + +# Poll until stats update or timeout. +# We pass current snapshot values for vm_new_visible_frozen_pages and expect rev counters unchanged. +$updated = wait_for_vacuum_stats( + tab_frozen_column => 'vm_new_visible_frozen_pages', + tab_all_frozen_pages_count => $vm_new_visible_frozen_pages, + run_vacuum => 1, + single_column => 1, +); + +ok($updated, + 'vacuum stats updated after vacuuming the all-updated table (vm_new_visible_frozen_pages advanced)') + or diag "Timeout waiting for pg_stat_vacuum_tables to update after $timeout seconds during vacuum"; + +#------------------------------------------------------------------------------ +# Snapshot current statistics for later comparison +#------------------------------------------------------------------------------ + +fetch_vacuum_stats(); + +#------------------------------------------------------------------------------ +# Verify statistics after final vacuum +# Check updated stats after backend work +#------------------------------------------------------------------------------ +ok($vm_new_visible_frozen_pages > $vm_new_visible_frozen_pages_prev, 'table vm_new_visible_frozen_pages has increased'); +ok($rev_all_frozen_pages == $rev_all_frozen_pages_prev, 'table rev_all_frozen_pages stay the same'); +ok($rev_all_visible_pages == $rev_all_visible_pages_prev, 'table rev_all_visible_pages stay the same'); +} or print_vacuum_stats_on_error(); +#------------------------------------------------------------------------------ +# Cleanup +#------------------------------------------------------------------------------ + +$node->safe_psql('postgres', q{ + DROP DATABASE statistic_vacuum_database_regression; +}); + +$node->stop; +done_testing(); diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 7d26bc1a1dc..d4dc44970d2 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2332,6 +2332,81 @@ pg_stat_user_tables| SELECT relid, rev_all_visible_pages FROM pg_stat_all_tables WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_stat_vacuum_database| SELECT d.oid AS dboid, + d.datname AS dbname, + s.db_blks_read, + s.db_blks_hit, + s.total_blks_dirtied, + s.total_blks_written, + s.wal_records, + s.wal_fpi, + s.wal_bytes, + s.blk_read_time, + s.blk_write_time, + s.delay_time, + s.total_time, + s.wraparound_failsafe, + s.errors + FROM pg_database d, + LATERAL pg_stat_get_vacuum_database(d.oid) s(dboid, db_blks_read, db_blks_hit, total_blks_dirtied, total_blks_written, wal_records, wal_fpi, wal_bytes, blk_read_time, blk_write_time, delay_time, total_time, wraparound_failsafe, errors); +pg_stat_vacuum_indexes| SELECT c.oid AS relid, + i.oid AS indexrelid, + n.nspname AS schemaname, + c.relname, + i.relname AS indexrelname, + s.total_blks_read, + s.total_blks_hit, + s.total_blks_dirtied, + s.total_blks_written, + s.rel_blks_read, + s.rel_blks_hit, + s.pages_deleted, + s.tuples_deleted, + s.wal_records, + s.wal_fpi, + s.wal_bytes, + s.blk_read_time, + s.blk_write_time, + s.delay_time, + s.total_time + FROM (((pg_class c + JOIN pg_index x ON ((c.oid = x.indrelid))) + JOIN pg_class i ON ((i.oid = x.indexrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))), + LATERAL pg_stat_get_vacuum_indexes(i.oid) s(relid, total_blks_read, total_blks_hit, total_blks_dirtied, total_blks_written, rel_blks_read, rel_blks_hit, pages_deleted, tuples_deleted, wal_records, wal_fpi, wal_bytes, blk_read_time, blk_write_time, delay_time, total_time) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); +pg_stat_vacuum_tables| SELECT n.nspname AS schemaname, + c.relname, + s.relid, + s.total_blks_read, + s.total_blks_hit, + s.total_blks_dirtied, + s.total_blks_written, + s.rel_blks_read, + s.rel_blks_hit, + s.pages_scanned, + s.pages_removed, + s.vm_new_frozen_pages, + s.vm_new_visible_pages, + s.vm_new_visible_frozen_pages, + s.missed_dead_pages, + s.tuples_deleted, + s.tuples_frozen, + s.recently_dead_tuples, + s.missed_dead_tuples, + s.wraparound_failsafe, + s.index_vacuum_count, + s.wal_records, + s.wal_fpi, + s.wal_bytes, + s.blk_read_time, + s.blk_write_time, + s.delay_time, + s.total_time + FROM (pg_class c + JOIN pg_namespace n ON ((n.oid = c.relnamespace))), + LATERAL pg_stat_get_vacuum_tables(c.oid) s(relid, total_blks_read, total_blks_hit, total_blks_dirtied, total_blks_written, rel_blks_read, rel_blks_hit, pages_scanned, pages_removed, vm_new_frozen_pages, vm_new_visible_pages, vm_new_visible_frozen_pages, missed_dead_pages, tuples_deleted, tuples_frozen, recently_dead_tuples, missed_dead_tuples, wraparound_failsafe, index_vacuum_count, wal_records, wal_fpi, wal_bytes, blk_read_time, blk_write_time, delay_time, total_time) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); pg_stat_wal| SELECT wal_records, wal_fpi, wal_bytes, -- 2.39.5 (Apple Git-154)