From c2bb94cf08b2bc472fd5d73270a4c09704f78d57 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Mon, 14 Aug 2023 17:06:42 +1200 Subject: [PATCH v5] Don't trust unvalidated xl_tot_len. xl_tot_len comes first in a WAL record. Usually we don't trust it to be the true length until we've validated the record header. If the record header was split across two pages, previously we wouldn't do the validation until after we'd already tried to allocate enough memory to hold the record, which was bad because it might actually be garbage bytes from a recycled WAL file, so we could try to allocate a lot of memory. Release 15 made it worse. Since 70b4f82a4b5, we'd at least generate an end-of-WAL condition if the garbage 4 byte value happened to be > 1GB, but we'd still try to allocate up to 1GB of memory bogusly otherwise. That was an improvement, but unfortunately release 15 tries to allocate another object before that, so you could get a FATAL error and recovery could fail. We can fix both variants of the problem more fundamentally using pre-existing page-level validation, if we just re-order some logic. The new order of operations in the split-header case defers all memory allocation based on xl_tot_len until we've read the following page. At that point we know that its first few bytes are not recycled data, by checking its xlp_pageaddr, and that its xlp_rem_len agrees with xl_tot_len on the preceding page. That is strong evidence that xl_tot_len was truly the start of a record that was logged. This problem was most likely to occur on a standby, because walreceiver.c recycles WAL files without zeroing out trailing regions of each page. We should fix that too, but that wouldn't protect us from rare crash scenarios where the trailing zeroes don't make it to disk. Back-patch to 15. We might decide to back-patch a variant of this to older branches with more study, but the symptoms there (bogus large allocation) are less severe. Author: Thomas Munro Author: Michael Paquier Reported-by: Alexander Lakhin Reviewed-by: Noah Misch (the idea, not the code) Reviewed-by: Michael Paquier Reviewed-by: Sergei Kornilov Discussion: https://postgr.es/m/17928-aa92416a70ff44a2%40postgresql.org --- src/backend/access/transam/xlog.c | 18 +- src/backend/access/transam/xlogreader.c | 77 ++-- src/test/perl/TestLib.pm | 36 ++ src/test/recovery/t/038_end_of_wal.pl | 449 ++++++++++++++++++++++++ 4 files changed, 539 insertions(+), 41 deletions(-) create mode 100644 src/test/recovery/t/038_end_of_wal.pl diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index dbd72c0a16..98e4cbc27d 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -12041,7 +12041,7 @@ retry: /* * Check the page header immediately, so that we can retry immediately if - * it's not valid. This may seem unnecessary, because XLogReadRecord() + * it's not valid. This may seem unnecessary, because ReadPageInternal() * validates the page header anyway, and would propagate the failure up to * ReadRecord(), which would retry. However, there's a corner case with * continuation records, if a record is split across two pages such that @@ -12064,9 +12064,23 @@ retry: * * Validating the page header is cheap enough that doing it twice * shouldn't be a big deal from a performance point of view. + * + * When not in standby mode, an invalid page header should cause recovery + * to end, not retry reading the page, so we don't need to validate the + * page header here for the retry. Instead, ReadPageInternal() is + * responsible for the validation. */ - if (!XLogReaderValidatePageHeader(xlogreader, targetPagePtr, readBuf)) + if (StandbyMode && + !XLogReaderValidatePageHeader(xlogreader, targetPagePtr, readBuf)) { + /* + * Emit this error right now then retry this page immediately. Use + * errmsg_internal() because the message was already translated. + */ + if (xlogreader->errormsg_buf[0]) + ereport(emode_for_corrupt_record(emode, EndRecPtr), + (errmsg_internal("%s", xlogreader->errormsg_buf))); + /* reset any error XLogReaderValidatePageHeader() might have set */ xlogreader->errormsg_buf[0] = '\0'; goto next_record_is_invalid; diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 26f8e0ae2c..0016a3b3ef 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -155,6 +155,9 @@ XLogReaderFree(XLogReaderState *state) * XLOG_BLCKSZ, and make sure it's at least 5*Max(BLCKSZ, XLOG_BLCKSZ) to start * with. (That is enough for all "normal" records, but very large commit or * abort records might need more space.) + * + * Note: This routine should *never* be called for xl_tot_len until the header + * of the record has been fully validated. */ static bool allocate_recordbuf(XLogReaderState *state, uint32 reclength) @@ -164,25 +167,6 @@ allocate_recordbuf(XLogReaderState *state, uint32 reclength) newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ); newSize = Max(newSize, 5 * Max(BLCKSZ, XLOG_BLCKSZ)); -#ifndef FRONTEND - - /* - * Note that in much unlucky circumstances, the random data read from a - * recycled segment can cause this routine to be called with a size - * causing a hard failure at allocation. For a standby, this would cause - * the instance to stop suddenly with a hard failure, preventing it to - * retry fetching WAL from one of its sources which could allow it to move - * on with replay without a manual restart. If the data comes from a past - * recycled segment and is still valid, then the allocation may succeed - * but record checks are going to fail so this would be short-lived. If - * the allocation fails because of a memory shortage, then this is not a - * hard failure either per the guarantee given by MCXT_ALLOC_NO_OOM. - */ - if (!AllocSizeIsValid(newSize)) - return false; - -#endif - if (state->readRecordBuf) pfree(state->readRecordBuf); state->readRecordBuf = @@ -346,15 +330,7 @@ restart: } else { - /* XXX: more validation should be done here */ - if (total_len < SizeOfXLogRecord) - { - report_invalid_record(state, - "invalid record length at %X/%X: wanted %u, got %u", - (uint32) (RecPtr >> 32), (uint32) RecPtr, - (uint32) SizeOfXLogRecord, total_len); - goto err; - } + /* We'll validate the header once we have the next page. */ gotheader = false; } @@ -370,17 +346,11 @@ restart: assembled = true; /* - * Enlarge readRecordBuf as needed. + * We always have space for a couple of pages, enough to validate a + * boundary-spanning record header. */ - if (total_len > state->readRecordBufSize && - !allocate_recordbuf(state, total_len)) - { - /* We treat this as a "bogus data" condition */ - report_invalid_record(state, "record length %u at %X/%X too long", - total_len, - (uint32) (RecPtr >> 32), (uint32) RecPtr); - goto err; - } + Assert(state->readRecordBufSize >= XLOG_BLCKSZ * 2); + Assert(state->readRecordBufSize >= len); /* Copy the first fragment of the record from the first page. */ memcpy(state->readRecordBuf, @@ -475,8 +445,37 @@ restart: goto err; gotheader = true; } - } while (gotlen < total_len); + /* + * We might need a bigger buffer. We have validated the record + * header, in the case that it split over a page boundary. We've + * also cross-checked total_len against xlp_rem_len on the second + * page, and verified xlp_pageaddr on both. + */ + Assert(gotheader); + if (total_len > state->readRecordBufSize) + { + char save_copy[XLOG_BLCKSZ * 2]; + + /* + * Save and restore the data we already had. It can't be more + * than two pages. + */ + Assert(gotlen <= lengthof(save_copy)); + Assert(gotlen <= state->readRecordBufSize); + memcpy(save_copy, state->readRecordBuf, gotlen); + if (!allocate_recordbuf(state, total_len)) + { + /* We treat this as a "bogus data" condition */ + report_invalid_record(state, "record length %u at %X/%X too long", + total_len, + (uint32) (RecPtr >> 32), (uint32) RecPtr); + goto err; + } + memcpy(state->readRecordBuf, save_copy, gotlen); + buffer = state->readRecordBuf + gotlen; + } + } while (gotlen < total_len); Assert(gotheader); record = (XLogRecord *) state->readRecordBuf; diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index de851e1615..5f80ff9cbc 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -33,6 +33,7 @@ our @EXPORT = qw( check_mode_recursive chmod_recursive check_pg_config + scan_server_header system_or_bail system_log run_log @@ -459,6 +460,41 @@ sub chmod_recursive return; } +# Returns an array that stores all the matches of the given regular expression +# within the PostgreSQL installation's C. This can be used to +# retrieve specific value patterns from the installation's header files. +sub scan_server_header +{ + my ($header_path, $regexp) = @_; + + my ($stdout, $stderr); + my $result = IPC::Run::run [ 'pg_config', '--includedir-server' ], '>', + \$stdout, '2>', \$stderr + or die "could not execute pg_config"; + chomp($stdout); + $stdout =~ s/\r$//; + + open my $header_h, '<', "$stdout/$header_path" or die "$!"; + + my @match = undef; + while (<$header_h>) + { + my $line = $_; + + if ($line =~ /^$regexp/) + { + # Found match, so store all the results. + @match = @{^CAPTURE}; + last; + } + } + + close $header_h; + die "could not find match in header $header_path\n" + unless @match; + return @match; +} + # Check presence of a given regexp within pg_config.h for the installation # where tests are running, returning a match status result depending on # that. diff --git a/src/test/recovery/t/038_end_of_wal.pl b/src/test/recovery/t/038_end_of_wal.pl new file mode 100644 index 0000000000..6927d1298a --- /dev/null +++ b/src/test/recovery/t/038_end_of_wal.pl @@ -0,0 +1,449 @@ +# Copyright (c) 2023, PostgreSQL Global Development Group +# +# Test detecting end of WAL replay. This test suite generates fake +# WAL records able to trigger various failure scenarios at replay. + +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; +use Fcntl qw(SEEK_SET); + +use integer; # causes / operator to use integer math + +# Header size of record header. +my $RECORD_HEADER_SIZE = 24; + +# Fields retrieved from code headers. +my @scan_result = scan_server_header('access/xlog_internal.h', + '#define\s+XLOG_PAGE_MAGIC\s+(\w+)'); +my $XLP_PAGE_MAGIC = hex($scan_result[0]); +@scan_result = scan_server_header('access/xlog_internal.h', + '#define\s+XLP_FIRST_IS_CONTRECORD\s+(\w+)'); +my $XLP_FIRST_IS_CONTRECORD = hex($scan_result[0]); + +# Values queried from the server +my $WAL_SEGMENT_SIZE; +my $WAL_BLOCK_SIZE; +my $TLI; + +# Build path of a WAL segment. +sub wal_segment_path +{ + my $node = shift; + my $tli = shift; + my $segment = shift; + my $wal_path = + sprintf("%s/pg_wal/%08X%08X%08X", $node->data_dir, $tli, 0, $segment); + return $wal_path; +} + +# Calculate from a LSN (in bytes) its segment number and its offset. +sub lsn_to_segment_and_offset +{ + my $lsn = shift; + return ($lsn / $WAL_SEGMENT_SIZE, $lsn % $WAL_SEGMENT_SIZE); +} + +# Write some arbitrary data in WAL for the given segment at offset. +# This should be called while the cluster is offline. +sub write_wal +{ + my $node = shift; + my $tli = shift; + my $lsn = shift; + my $data = shift; + + my ($segment, $offset) = lsn_to_segment_and_offset($lsn); + my $path = wal_segment_path($node, $tli, $segment); + + open my $fh, "+<:raw", $path or die; + seek($fh, $offset, SEEK_SET) or die; + print $fh $data; + close $fh; +} + +sub format_lsn +{ + my $lsn = shift; + return sprintf("%X/%X", $lsn >> 32, $lsn & 0xffffffff); +} + +# Emit a WAL record of arbitrary size. Returns the end LSN of the +# record inserted, in bytes. +sub emit_message +{ + my $node = shift; + my $size = shift; + return int( + $node->safe_psql( + 'postgres', + "SELECT pg_logical_emit_message(true, '', repeat('a', $size)) - '0/0'" + )); +} + +# Get the current insert LSN of a node, in bytes. +sub get_insert_lsn +{ + my $node = shift; + return int( + $node->safe_psql( + 'postgres', "SELECT pg_current_wal_insert_lsn() - '0/0'")); +} + +# Get GUC value, converted to an int. +sub get_int_setting +{ + my $node = shift; + my $name = shift; + return int( + $node->safe_psql( + 'postgres', + "SELECT setting FROM pg_settings WHERE name = '$name'")); +} + +sub start_of_page +{ + my $lsn = shift; + return $lsn & ~($WAL_BLOCK_SIZE - 1); +} + +sub start_of_next_page +{ + my $lsn = shift; + return start_of_page($lsn) + $WAL_BLOCK_SIZE; +} + +# Build a fake WAL record header based on the data given by the caller. +# This needs to follow the format of the C structure XLogRecord. To +# be inserted with write_wal(). +sub build_record_header +{ + my $xl_tot_len = shift; + my $xl_xid = shift || 0; + my $xl_prev = shift || 0; + my $xl_info = shift || 0; + my $xl_rmid = shift || 0; + my $xl_crc = shift || 0; + + # This needs to follow the structure XLogRecord: + # I for xl_tot_len + # I for xl_xid + # Q for xl_prev + # C for xl_info + # C for xl_rmid + # BB for two bytes of padding + # I for xl_crc + return pack("IIQCCBBI", + $xl_tot_len, $xl_xid, $xl_prev, $xl_info, $xl_rmid, 0, 0, $xl_crc); +} + +# Build a fake WAL page header, based on the data given by the caller +# This needs to follow the format of the C structure XLogPageHeaderData. +# To be inserted with write_wal(). +sub build_page_header +{ + my $xlp_magic = shift; + my $xlp_info = shift || 0; + my $xlp_tli = shift || 0; + my $xlp_pageaddr = shift || 0; + my $xlp_rem_len = shift || 0; + + # This needs to follow the structure XLogPageHeaderData: + # S for xlp_magic + # S for xlp_info + # I for xlp_tli + # Q for xlp_pageaddr + # I for xlp_rem_len + return pack("SSIQI", + $xlp_magic, $xlp_info, $xlp_tli, $xlp_pageaddr, $xlp_rem_len); +} + +# Make sure we are far away enough from the end of a page that we could insert +# a couple of small records. This inserts a few records of a fixed size, until +# the threshold gets close enough to the end of the WAL page inserting records +# to. +sub advance_out_of_record_splitting_zone +{ + my $node = shift; + + my $page_threshold = 2000; + my $end_lsn = get_insert_lsn($node); + my $page_offset = $end_lsn % $WAL_BLOCK_SIZE; + while ($page_offset >= $WAL_BLOCK_SIZE - $page_threshold) + { + $end_lsn = emit_message($node, $page_threshold); + $page_offset = $end_lsn % $WAL_BLOCK_SIZE; + } + return $end_lsn; +} + +# Advance so close to the end of a page that an XLogRecordHeader would not +# fit on it. +sub advance_to_record_splitting_zone +{ + my $node = shift; + + my $end_lsn = get_insert_lsn($node); + my $page_offset = $end_lsn % $WAL_BLOCK_SIZE; + + # Get fairly close to the end of a page in big steps + while ($page_offset <= $WAL_BLOCK_SIZE - 512) + { + $end_lsn = emit_message($node, $WAL_BLOCK_SIZE - $page_offset - 256); + $page_offset = $end_lsn % $WAL_BLOCK_SIZE; + } + + # Calibrate our message size so that we can get closer 8 bytes at + # a time. + my $message_size = $WAL_BLOCK_SIZE - 80; + while ($page_offset <= $WAL_BLOCK_SIZE - $RECORD_HEADER_SIZE) + { + $end_lsn = emit_message($node, $message_size); + + my $old_offset = $page_offset; + $page_offset = $end_lsn % $WAL_BLOCK_SIZE; + + # Adjust the message size until it causes 8 bytes changes in + # offset, enough to be able to split a record header. + my $delta = $page_offset - $old_offset; + if ($delta > 8) + { + $message_size -= 8; + } + elsif ($delta <= 0) + { + $message_size += 8; + } + } + return $end_lsn; +} + +# Setup a new node. The configuration chosen here minimizes the number +# of arbitrary records that could get generated in a cluster. Enlarging +# checkpoint_timeout avoids noise with checkpoint activity. wal_level +# set to "minimal" avoids random standby snapshot records. Autovacuum +# could also trigger randomly, generating random WAL activity of its own. +my $node = PostgreSQL::Test::Cluster->new("node"); +$node->init; +$node->append_conf( + 'postgresql.conf', + q[wal_level = minimal + autovacuum = off + checkpoint_timeout = '30min' +]); +$node->start; +$node->safe_psql('postgres', "CREATE TABLE t AS SELECT 42"); + +$WAL_SEGMENT_SIZE = get_int_setting($node, 'wal_segment_size'); +$WAL_BLOCK_SIZE = get_int_setting($node, 'wal_block_size'); +$TLI = $node->safe_psql('postgres', + "SELECT timeline_id FROM pg_control_checkpoint();"); + +my $end_lsn; +my $prev_lsn; + +########################################################################### +note "Single-page end-of-WAL detection"; +########################################################################### + +# xl_tot_len is 0 (a common case, we hit trailing zeroes). +$end_lsn = advance_out_of_record_splitting_zone($node); +$node->stop('immediate'); +my $log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "invalid record length at .*: wanted 24, got 0", $log_size + ), + "xl_tot_len zero"); + +# xl_tot_len is < 24 (presumably recycled garbage). +$end_lsn = advance_out_of_record_splitting_zone($node); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, build_record_header(23)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "invalid record length at .*: wanted 24, got 23", + $log_size), + "xl_tot_len short"); + +# Need more pages, but xl_prev check fails first. +$end_lsn = advance_out_of_record_splitting_zone($node); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, 0xdeadbeef)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "record with incorrect prev-link 0/DEADBEEF at .*", $log_size), + "xl_prev bad"); + +# xl_crc check fails. +advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 10); +$node->stop('immediate'); +# Corrupt a byte in that record, breaking its CRC. +write_wal($node, $TLI, $end_lsn - 8, '!'); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "incorrect resource manager data checksum in record at .*", $log_size + ), + "xl_crc bad"); + + +########################################################################### +note "Multi-page end-of-WAL detection, header is not split"; +########################################################################### + +# This series of tests requires a valid xl_prev set in the record header +# written to WAL. + +# Good xl_prev, we hit zero page next (zero magic). +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, $prev_lsn)); +$log_size = -s $node->logfile; +$node->start; +ok($node->log_contains("invalid magic number 0000 ", $log_size), + "xlp_magic zero"); + +# Good xl_prev, we hit garbage page next (bad magic). +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, $prev_lsn)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header(0xcafe, 0, 1, 0)); +$log_size = -s $node->logfile; +$node->start; +ok($node->log_contains("invalid magic number CAFE ", $log_size), + "xlp_magic bad"); + +# Good xl_prev, we hit typical recycled page (good xlp_magic, bad +# xlp_pageaddr). +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, $prev_lsn)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header($XLP_PAGE_MAGIC, 0, 1, 0xbaaaaaad)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "unexpected pageaddr 0/BAAAAAAD ", $log_size), + "xlp_pageaddr bad"); + +# Good xl_prev, xlp_magic, xlp_pageaddr, but bogus xlp_info. +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 42, $prev_lsn)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header( + $XLP_PAGE_MAGIC, 0x1234, 1, start_of_next_page($end_lsn))); +$log_size = -s $node->logfile; +$node->start; +ok($node->log_contains("invalid info bits 1234 ", $log_size), + "xlp_info bad"); + +# Good xl_prev, xlp_magic, xlp_pageaddr, but xlp_info doesn't mention +# continuation record. +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 42, $prev_lsn)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header($XLP_PAGE_MAGIC, 0, 1, start_of_next_page($end_lsn))); +$log_size = -s $node->logfile; +$node->start; +ok($node->log_contains("there is no contrecord flag at .*", $log_size), + "xlp_info lacks XLP_FIRST_IS_CONTRECORD"); + +# Good xl_prev, xlp_magic, xlp_pageaddr, xlp_info but xlp_rem_len doesn't add +# up. +$prev_lsn = advance_out_of_record_splitting_zone($node); +$end_lsn = emit_message($node, 0); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 42, $prev_lsn)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header( + $XLP_PAGE_MAGIC, $XLP_FIRST_IS_CONTRECORD, + 1, start_of_next_page($end_lsn), + 123456)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "invalid contrecord length 123456 at .*", $log_size), + "xlp_rem_len bad"); + + +########################################################################### +note "Multi-page, but header is split, so page checks are done first"; +########################################################################### + +# xlp_prev is bad and xl_tot_len is too big, but we'll check xlp_magic first. +$end_lsn = advance_to_record_splitting_zone($node); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, 0xdeadbeef)); +$log_size = -s $node->logfile; +$node->start; +ok($node->log_contains("invalid magic number 0000 ", $log_size), + "xlp_magic zero (split record header)"); + +# And we'll also check xlp_pageaddr before any header checks. +$end_lsn = advance_to_record_splitting_zone($node); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, 0xdeadbeef)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header( + $XLP_PAGE_MAGIC, $XLP_FIRST_IS_CONTRECORD, 1, 0xbaaaaaad)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "unexpected pageaddr 0/BAAAAAAD ", $log_size), + "xlp_pageaddr bad (split record header)"); + +# We'll also discover that xlp_rem_len doesn't add up before any +# header checks, +$end_lsn = advance_to_record_splitting_zone($node); +$node->stop('immediate'); +write_wal($node, $TLI, $end_lsn, + build_record_header(2 * 1024 * 1024 * 1024, 0, 0xdeadbeef)); +write_wal( + $node, $TLI, + start_of_next_page($end_lsn), + build_page_header( + $XLP_PAGE_MAGIC, $XLP_FIRST_IS_CONTRECORD, + 1, start_of_next_page($end_lsn), + 123456)); +$log_size = -s $node->logfile; +$node->start; +ok( $node->log_contains( + "invalid contrecord length 123456 at .*", $log_size), + "xlp_rem_len bad (split record header)"); + +done_testing(); -- 2.40.1