From d9d363c7c298f44063a2aa33530622548ee45cbf Mon Sep 17 00:00:00 2001 From: Rintaro Ikeda Date: Sun, 8 Jun 2025 23:40:32 +0900 Subject: [PATCH 2/2] 1. Do not retry failed transaction due to other_sql_failures. 2. modify documentation and comments. 3. add test. --- doc/src/sgml/ref/pgbench.sgml | 3 +++ src/bin/pgbench/pgbench.c | 26 +++++++++++++++----- src/bin/pgbench/t/001_pgbench_with_server.pl | 22 +++++++++++++++++ 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml index dcb8c1c487c..2086dd59cb3 100644 --- a/doc/src/sgml/ref/pgbench.sgml +++ b/doc/src/sgml/ref/pgbench.sgml @@ -923,6 +923,9 @@ pgbench options d serialization failure. This allows all clients specified with -c option to continuously apply load to the server, even if some transactions fail. + + Note that this option can not be used together with + . diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 5db222f2c1e..2333110c29f 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -287,6 +287,9 @@ static int main_pid; /* main process id used in log filename */ /* * We cannot retry a transaction after the serialization/deadlock error if its * number of tries reaches this maximum; if its value is zero, it is not used. + * We can ignore errors including serialization/deadlock errors and other errors + * if --continue-on-error is set, but in this case the failed transaction is not + * retried. */ static uint32 max_tries = 1; @@ -402,7 +405,8 @@ typedef struct StatsData * directly successful transactions (they were successfully completed on * the first try). * - * A failed transaction is defined as unsuccessfully retried transactions. + * A failed transaction is defined as unsuccessfully retried transactions + * unless continue-on-error option is specified. * It can be one of two types: * * failed (the number of failed transactions) = @@ -411,6 +415,11 @@ typedef struct StatsData * 'deadlock_failures' (they got a deadlock error and were not * successfully retried). * + * When continue-on-error option is specified, + * failed (the number of failed transactions) = + * 'other_sql_failures' (they got a error when continue-on-error option + * was specified). + * * If the transaction was retried after a serialization or a deadlock * error this does not guarantee that this retry was successful. Thus * @@ -960,7 +969,7 @@ usage(void) " (default: \"pgbench_log\")\n" " --max-tries=NUM max number of tries to run transaction (default: 1)\n" " --continue-on-error\n" - " Continue and retry transactions that failed due to errors other than serialization or deadlocks.\n" + " continue to process transactions after a trasaction fails due to errors other than serialization or deadlocks.\n" " --progress-timestamp use Unix epoch timestamps for progress\n" " --random-seed=SEED set random seed (\"time\", \"rand\", integer)\n" " --sampling-rate=NUM fraction of transactions to log (e.g., 0.01 for 1%%)\n" @@ -3258,8 +3267,7 @@ static bool canRetryError(EStatus estatus) { return (estatus == ESTATUS_SERIALIZATION_ERROR || - estatus == ESTATUS_DEADLOCK_ERROR || - (continue_on_error && estatus == ESTATUS_OTHER_SQL_ERROR)); + estatus == ESTATUS_DEADLOCK_ERROR); } /* @@ -4019,7 +4027,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg) if (PQpipelineStatus(st->con) != PQ_PIPELINE_ON) st->state = CSTATE_END_COMMAND; } - else if (canRetryError(st->estatus)) + else if (canRetryError(st->estatus) | continue_on_error) st->state = CSTATE_ERROR; else st->state = CSTATE_ABORTED; @@ -4111,6 +4119,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg) * can retry the error. */ st->state = timer_exceeded ? CSTATE_FINISHED : + continue_on_error ? CSTATE_FAILURE : doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE; } else @@ -6446,7 +6455,8 @@ printResults(StatsData *total, /* * Remaining stats are nonsensical if we failed to execute any xacts due - * to others than serialization or deadlock errors + * to others than serialization or deadlock errors and --continue-on-error + * is not set. */ if (total_cnt <= 0) return; @@ -7086,6 +7096,7 @@ main(int argc, char **argv) pg_logging_increase_verbosity(); break; case 18: /* continue-on-error */ + benchmarking_option_set = true; continue_on_error = true; break; default: @@ -7242,6 +7253,9 @@ main(int argc, char **argv) pg_fatal("an unlimited number of transaction tries can only be used with --latency-limit or a duration (-T)"); } + if (exit_on_abort && continue_on_error) + pg_fatal("--exit-on-abort and --continue-on-error are mutually exclusive options"); + /* * save main process id in the global variable because process id will be * changed after fork. diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index 7dd78940300..afb49b554d0 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -1813,6 +1813,28 @@ update counter set i = i+1 returning i \gset # Clean up $node->safe_psql('postgres', 'DROP TABLE counter;'); +# Test --continue-on-error +$node->safe_psql('postgres', + 'CREATE TABLE unique_table(i int unique); ' . 'INSERT INTO unique_table VALUES (0);'); + +$node->pgbench( + '-t 10 --continue-on-error --failures-detailed', + 0, + [ + qr{processed: 0/10\b}, + qr{other failures: 10\b} + ], + [], + 'test --continue-on-error', + { + '002_continue_on_error' => q{ + insert into unique_table values 0; + } + }); + +# Clean up +$node->safe_psql('postgres', 'DROP TABLE unique_table;'); + # done $node->safe_psql('postgres', 'DROP TABLESPACE regress_pgbench_tap_1_ts'); $node->stop; -- 2.39.5 (Apple Git-154)