From 3f01281af3ba81b35777cb7d717f76e001fd3e10 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sat, 19 Feb 2022 14:07:35 -0800 Subject: [PATCH] Add adversarial ConditionalLockBufferForCleanup() gizmo to vacuumlazy.c. --- src/backend/access/heap/vacuumlazy.c | 36 +++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 242511a23..31c6b360e 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -50,6 +50,7 @@ #include "commands/dbcommands.h" #include "commands/progress.h" #include "commands/vacuum.h" +#include "common/pg_prng.h" #include "executor/instrument.h" #include "miscadmin.h" #include "optimizer/paths.h" @@ -748,6 +749,39 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, } } +/* + * Adversarial gizmo, simulates excessive failure to get cleanup locks + */ +static inline bool +lazy_conditionallockbufferforcleanup(Buffer buffer) +{ + /* + * Artificially fail to get a cleanup lock 50% of the time. + * + * XXX: What about temp tables? We simulate not getting a cleanup lock + * there, but is that choice actually reasonable? + */ + if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 2)) + return false; + +#if 0 + /* + * 50% is very very aggressive, while 2% - 5% is still basically + * adversarial but in many ways less annoying. + * + * This version (which injects a failure to get a cleanup lock 2% of the + * time) seems to pass the regression tests, even with my parallel make + * check-world recipe. Expected query plans don't seem to shift on + * account of unexpected index bloat (nor are there any problems of a + * similar nature) with this variant of the gizmo. + */ + if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 50)) + return false; +#endif + + return ConditionalLockBufferForCleanup(buffer); +} + /* * lazy_scan_heap() -- workhorse function for VACUUM * @@ -1093,7 +1127,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers) * a cleanup lock right away, we may be able to settle for reduced * processing using lazy_scan_noprune. */ - if (!ConditionalLockBufferForCleanup(buf)) + if (!lazy_conditionallockbufferforcleanup(buf)) { bool hastup, recordfreespace; -- 2.30.2