From 49af187f2366f21274a1e7efcc4d23dde42ec654 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Wed, 6 Sep 2017 15:17:09 +0200 Subject: [PATCH 2/2] Don't freeze recently dead HOT tuples --- src/backend/access/heap/heapam.c | 11 +++++++---- src/backend/commands/vacuumlazy.c | 34 +++++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index e61dd771d0..aa0b270db8 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6470,7 +6470,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * aborted or crashed then it's okay to ignore it, otherwise not. * Note that an updater older than cutoff_xid cannot possibly be * committed, because HeapTupleSatisfiesVacuum would have returned - * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple. + * HEAPTUPLE_DEAD or HEAPTUPLE_RECENTLY_DEAD and we would not be + * trying to freeze the tuple. * * As with all tuple visibility routines, it's critical to test * TransactionIdIsInProgress before TransactionIdDidCommit, @@ -6500,8 +6501,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, */ /* - * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the - * update Xid cannot possibly be older than the xid cutoff. + * Since the tuple wasn't marked HEAPTUPLE_DEAD or + * HEAPTUPLE_RECENTLY_DEAD by vacuum, the update Xid cannot + * possibly be older than the xid cutoff. */ Assert(!TransactionIdIsValid(update_xid) || !TransactionIdPrecedes(update_xid, cutoff_xid)); @@ -6584,7 +6586,8 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * * It is assumed that the caller has checked the tuple with * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD - * (else we should be removing the tuple, not freezing it). + * nor HEAPTUPLE_RECENTLY_DEAD, in which cases the tuple *must not* be + * frozen. * * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any * XID older than it could neither be running nor seen as running by any diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 45b1859475..c26cb21c3a 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -593,6 +593,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, OffsetNumber offnum, maxoff; bool tupgone, + deadkept, hastup; int prev_dead_count; int nfrozen; @@ -974,6 +975,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, tuple.t_tableOid = RelationGetRelid(onerel); tupgone = false; + deadkept = false; switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf)) { @@ -992,11 +994,13 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * it were RECENTLY_DEAD. Also, if it's a heap-only * tuple, we choose to keep it, because it'll be a lot * cheaper to get rid of it in the next pruning pass than - * to treat it like an indexed tuple. + * to treat it like an indexed tuple; but make sure we do + * not freeze it either, because that would make it + * visible again. */ if (HeapTupleIsHotUpdated(&tuple) || HeapTupleIsHeapOnly(&tuple)) - nkeep += 1; + deadkept = true; else tupgone = true; /* we can delete the tuple */ all_visible = false; @@ -1049,7 +1053,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * If tuple is recently deleted then we must not remove it * from relation. */ - nkeep += 1; + deadkept = true; all_visible = false; break; case HEAPTUPLE_INSERT_IN_PROGRESS: @@ -1065,6 +1069,9 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, break; } + if (deadkept) + nkeep += 1; + if (tupgone) { lazy_record_dead_tuple(vacrelstats, &(tuple.t_self)); @@ -1075,7 +1082,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, } else { - bool tuple_totally_frozen; + bool tuple_totally_frozen = false; num_tuples += 1; hastup = true; @@ -1084,12 +1091,21 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * Each non-removable tuple must be checked to see if it needs * freezing. Note we already have exclusive buffer lock. */ - if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit, - MultiXactCutoff, &frozen[nfrozen], - &tuple_totally_frozen)) - frozen[nfrozen++].offset = offnum; + if (!deadkept) + { + if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit, + MultiXactCutoff, + &frozen[nfrozen], + &tuple_totally_frozen)) + frozen[nfrozen++].offset = offnum; + } - if (!tuple_totally_frozen) + /* + * If we kept this dead tuple, or the tuple could not be + * completely frozen, then we must not mark the page as + * all-frozen. + */ + if (deadkept || !tuple_totally_frozen) all_frozen = false; } } /* scan along page */ -- 2.11.0