Author: Noah Misch Commit: Noah Misch Use HASH_BLOBS for xidhash. This caused BufFile errors on buildfarm member sungazer, and SIGSEGV was possible. Conditions for reaching those symptoms were more frequent on big-endian systems. Reviewed by FIXME. Discussion: https://postgr.es/m/20201129214441.GA691200@rfd.leadboat.com diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index c37aafe..fce1dee 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -804,7 +804,7 @@ apply_handle_stream_start(StringInfo s) hash_ctl.entrysize = sizeof(StreamXidHash); hash_ctl.hcxt = ApplyContext; xidhash = hash_create("StreamXidHash", 1024, &hash_ctl, - HASH_ELEM | HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* open the spool file for this transaction */ diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 1488bff..40610f1 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -1626,6 +1626,42 @@ sub interactive_psql return $harness; } +# return IPC::Run harness object for non-interactive psql +# FIXME pick a better name, and add POD docs +sub psql_printable +{ + my ($self, $dbname, $stdin, $stdout, $timer, %params) = @_; + + my $replication = $params{replication}; + + my @psql_params = ( + 'psql', + '-XAtq', + '-d', + $self->connstr($dbname) + . (defined $replication ? " replication=$replication" : ""), + '-f', + '-'); + + $params{on_error_stop} = 1 unless defined $params{on_error_stop}; + + push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop}; + push @psql_params, @{ $params{extra_params} } + if defined $params{extra_params}; + + # Ensure there is no data waiting to be sent: + $$stdin = "" if ref($stdin); + # IPC::Run would otherwise append to existing contents: + $$stdout = "" if ref($stdout); + + my $harness = IPC::Run::start \@psql_params, + '<', $stdin, '>', $stdout, $timer; + + die "psql startup timed out" if $timer->is_expired; + + return $harness; +} + =pod =item $node->poll_query_until($dbname, $query [, $expected ]) diff --git a/src/test/subscription/t/015_stream.pl b/src/test/subscription/t/015_stream.pl index fffe001..9ebe166 100644 --- a/src/test/subscription/t/015_stream.pl +++ b/src/test/subscription/t/015_stream.pl @@ -47,14 +47,35 @@ my $result = is($result, qq(2|2|2), 'check initial data was copied to subscriber'); # Insert, update and delete enough rows to exceed the 64kB limit. -$node_publisher->safe_psql('postgres', q{ +my $in = ''; +my $out = ''; + +my $timer = IPC::Run::timer(180); + +my $h = $node_publisher->psql_printable('postgres', \$in, \$out, $timer); + +$in .= q{ BEGIN; INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i); UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0; DELETE FROM test_tab WHERE mod(a,3) = 0; +}; +$h->pump; + +$node_publisher->safe_psql('postgres', q{ +BEGIN; +INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(5001, 9999) s(i); +DELETE FROM test_tab WHERE a > 5000; COMMIT; }); +$in .= q{ +COMMIT; +\q +}; +$h->pump; +$h->finish; + $node_publisher->wait_for_catchup($appname); $result =