From 936bfed4ab60f010a7578771e8d57ea32a54a892 Mon Sep 17 00:00:00 2001
Message-ID: <936bfed4ab60f010a7578771e8d57ea32a54a892.1778237699.git.james.locke.uk@gmail.com>
In-Reply-To: <cover.1778237699.git.james.locke.uk@gmail.com>
References: <CAA-aLv6sYZ5XnuYrytTjxZumBh3KrdyMRmasxHfgaKf-HJrNpw@mail.gmail.com>
	<cover.1778237699.git.james.locke.uk@gmail.com>
From: James Lock <james.locke.uk@gmail.com>
Date: Thu, 7 May 2026 14:11:01 +0100
Subject: [POC PATCH 5/5] Add tests for COMPACT

Three layers of test coverage for the new command:

  - src/test/regress/sql/compact.sql exercises COMPACT end-to-end:
    that COMPACT preserves row counts, packs the surviving rows onto
    low-numbered pages, accepts VERBOSE/ANALYZE options, and rejects
    unknown options.  The test asserts the layout invariant (rows now
    on low pages) rather than the size invariant (relation truncated),
    because truncation depends on OldestXmin which can be held back by
    a concurrent test in a parallel slot.

  - src/test/isolation/specs/compact.spec covers concurrency edges:
    a row held by SELECT FOR UPDATE in another session, a row being
    UPDATEd by another session, and a REPEATABLE READ reader observing
    a stable count across an intervening COMPACT.

  - contrib/pg_compact_test is a small contrib module exposing two
    SQL-callable helpers, pg_test_compact_buffer() and
    pg_test_relocate_tuple(), so the lower-level primitives
    (RelationGetSpecificBufferForTuple and heap_relocate) can be
    smoke-tested in isolation from the orchestrator.  This is meant
    primarily as a development aid; reviewers may prefer to drop it.

The contrib unit test, the main regression test, and the isolation
spec all exercise distinct layers of the implementation, so a
regression in any one of heap_relocate, lazy_compact_heap, or
ExecCompact will be caught without needing to run the full feature.
---
 contrib/Makefile                              |   1 +
 contrib/meson.build                           |   1 +
 contrib/pg_compact_test/Makefile              |  23 ++
 .../expected/pg_compact_test.out              | 116 +++++++++
 contrib/pg_compact_test/meson.build           |  34 +++
 .../pg_compact_test/pg_compact_test--1.0.sql  |  18 ++
 contrib/pg_compact_test/pg_compact_test.c     | 235 ++++++++++++++++++
 .../pg_compact_test/pg_compact_test.control   |   5 +
 .../pg_compact_test/sql/pg_compact_test.sql   |  93 +++++++
 src/test/isolation/expected/compact.out       | 113 +++++++++
 src/test/isolation/isolation_schedule         |   1 +
 src/test/isolation/specs/compact.spec         |  75 ++++++
 src/test/regress/expected/compact.out         |  61 +++++
 src/test/regress/parallel_schedule            |   2 +-
 src/test/regress/sql/compact.sql              |  46 ++++
 15 files changed, 823 insertions(+), 1 deletion(-)
 create mode 100644 contrib/pg_compact_test/Makefile
 create mode 100644 contrib/pg_compact_test/expected/pg_compact_test.out
 create mode 100644 contrib/pg_compact_test/meson.build
 create mode 100644 contrib/pg_compact_test/pg_compact_test--1.0.sql
 create mode 100644 contrib/pg_compact_test/pg_compact_test.c
 create mode 100644 contrib/pg_compact_test/pg_compact_test.control
 create mode 100644 contrib/pg_compact_test/sql/pg_compact_test.sql
 create mode 100644 src/test/isolation/expected/compact.out
 create mode 100644 src/test/isolation/specs/compact.spec
 create mode 100644 src/test/regress/expected/compact.out
 create mode 100644 src/test/regress/sql/compact.sql

diff --git a/contrib/Makefile b/contrib/Makefile
index 7d91fe77db3..34305f32bcc 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -31,6 +31,7 @@ SUBDIRS = \
 		pageinspect	\
 		passwordcheck	\
 		pg_buffercache	\
+		pg_compact_test	\
 		pg_freespacemap \
 		pg_logicalinspect \
 		pg_overexplain \
diff --git a/contrib/meson.build b/contrib/meson.build
index ebb7f83d8c5..c207602d0eb 100644
--- a/contrib/meson.build
+++ b/contrib/meson.build
@@ -44,6 +44,7 @@ subdir('oid2name')
 subdir('pageinspect')
 subdir('passwordcheck')
 subdir('pg_buffercache')
+subdir('pg_compact_test')
 subdir('pgcrypto')
 subdir('pg_freespacemap')
 subdir('pg_logicalinspect')
diff --git a/contrib/pg_compact_test/Makefile b/contrib/pg_compact_test/Makefile
new file mode 100644
index 00000000000..d4393b02ae4
--- /dev/null
+++ b/contrib/pg_compact_test/Makefile
@@ -0,0 +1,23 @@
+# contrib/pg_compact_test/Makefile
+
+MODULE_big = pg_compact_test
+OBJS = \
+	$(WIN32RES) \
+	pg_compact_test.o
+
+EXTENSION = pg_compact_test
+DATA = pg_compact_test--1.0.sql
+PGFILEDESC = "pg_compact_test - exercise heap compaction primitives"
+
+REGRESS = pg_compact_test
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pg_compact_test
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/pg_compact_test/expected/pg_compact_test.out b/contrib/pg_compact_test/expected/pg_compact_test.out
new file mode 100644
index 00000000000..8c1570ab4cc
--- /dev/null
+++ b/contrib/pg_compact_test/expected/pg_compact_test.out
@@ -0,0 +1,116 @@
+CREATE EXTENSION pg_compact_test;
+--
+-- Exercise RelationGetSpecificBufferForTuple via pg_test_compact_buffer().
+--
+-- A fillfactor=50 table guarantees ample free space on every page, so a
+-- small tuple_size is certain to fit regardless of the compiled block size.
+--
+CREATE TABLE compacttest (id int, payload text) WITH (fillfactor = 50);
+INSERT INTO compacttest
+SELECT g, repeat('x', 200) FROM generate_series(1, 1000) g;
+-- Sanity: at least a few pages so source_block (5) is in range.
+SELECT pg_relation_size('compacttest') / current_setting('block_size')::int >= 6
+  AS has_enough_pages;
+ has_enough_pages 
+------------------
+ t
+(1 row)
+
+-- Success path: a small tuple fits on a half-empty page.
+SELECT pg_test_compact_buffer('compacttest', 0, 5, 200) AS small_tuple_fits;
+ small_tuple_fits 
+------------------
+ t
+(1 row)
+
+--
+-- Argument validation.  We use sqlstate verbosity for the messages whose
+-- wording embeds runtime-dependent values like the relation page count.
+--
+-- Same source/target page is rejected.
+SELECT pg_test_compact_buffer('compacttest', 3, 3, 200);
+ERROR:  target_block must differ from source_block
+-- Non-positive tuple_size is rejected.
+SELECT pg_test_compact_buffer('compacttest', 0, 5, 0);
+ERROR:  tuple_size must be positive
+-- Out-of-range block.  Use sqlstate verbosity because the message embeds
+-- the table's current page count.
+\set VERBOSITY sqlstate
+SELECT pg_test_compact_buffer('compacttest', 0, 999999, 200);
+ERROR:  22023
+\set VERBOSITY default
+-- Wrong relkind: an index is not a heap.
+CREATE INDEX compacttest_idx ON compacttest (id);
+SELECT pg_test_compact_buffer('compacttest_idx', 0, 1, 100);
+ERROR:  relation "compacttest_idx" is of wrong relation kind
+DROP TABLE compacttest;
+--
+-- Exercise heap_relocate end-to-end via pg_test_relocate_tuple(): move a
+-- tuple from a tail page to page 0 and verify heap-level invariants.
+--
+-- Note: this wrapper does not update indexes (that responsibility belongs
+-- to the VACUUM (COMPACT) orchestrator), so the test uses only TID-based
+-- queries and sequential scans, which see the relocated row directly.
+--
+CREATE TABLE relocate_test (id int, payload text) WITH (fillfactor = 50);
+INSERT INTO relocate_test
+SELECT g, repeat('y', 200) FROM generate_series(1, 1000) g;
+-- Snapshot the row that lives physically last in the table.
+SELECT ctid AS rel_source_ctid
+  FROM relocate_test
+  ORDER BY ctid DESC
+  LIMIT 1
+\gset
+-- Source row really is past page 0.
+SELECT (:'rel_source_ctid'::tid::text NOT LIKE '(0,%)') AS source_past_page_0;
+ source_past_page_0 
+--------------------
+ t
+(1 row)
+
+-- Relocate the tuple onto page 0.  Returns the new TID (NULL = skipped).
+SELECT pg_test_relocate_tuple('relocate_test',
+                              :'rel_source_ctid'::tid,
+                              0) AS rel_new_tid
+\gset
+-- New TID is on page 0.
+SELECT :'rel_new_tid' LIKE '(0,%)' AS new_tid_on_page_0;
+ new_tid_on_page_0 
+-------------------
+ t
+(1 row)
+
+-- The TID changed.
+SELECT (:'rel_source_ctid'::tid <> :'rel_new_tid'::tid) AS tid_changed;
+ tid_changed 
+-------------
+ t
+(1 row)
+
+-- Row count is unchanged via sequential scan: no duplicates, no rows lost.
+SELECT count(*) AS row_count FROM relocate_test;
+ row_count 
+-----------
+      1000
+(1 row)
+
+-- The relocated row is reachable at the new TID exactly once.
+SELECT count(*) AS at_new_tid
+  FROM relocate_test
+  WHERE ctid = :'rel_new_tid'::tid;
+ at_new_tid 
+------------
+          1
+(1 row)
+
+-- The old TID no longer holds a live row.
+SELECT count(*) AS old_tid_dead
+  FROM relocate_test
+  WHERE ctid = :'rel_source_ctid'::tid;
+ old_tid_dead 
+--------------
+            0
+(1 row)
+
+DROP TABLE relocate_test;
+DROP EXTENSION pg_compact_test;
diff --git a/contrib/pg_compact_test/meson.build b/contrib/pg_compact_test/meson.build
new file mode 100644
index 00000000000..e1d449af927
--- /dev/null
+++ b/contrib/pg_compact_test/meson.build
@@ -0,0 +1,34 @@
+# Copyright (c) 2026, PostgreSQL Global Development Group
+
+pg_compact_test_sources = files(
+  'pg_compact_test.c',
+)
+
+if host_system == 'windows'
+  pg_compact_test_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+    '--NAME', 'pg_compact_test',
+    '--FILEDESC', 'pg_compact_test - exercise heap compaction primitives',])
+endif
+
+pg_compact_test = shared_module('pg_compact_test',
+  pg_compact_test_sources,
+  kwargs: contrib_mod_args,
+)
+contrib_targets += pg_compact_test
+
+install_data(
+  'pg_compact_test--1.0.sql',
+  'pg_compact_test.control',
+  kwargs: contrib_data_args,
+)
+
+tests += {
+  'name': 'pg_compact_test',
+  'sd': meson.current_source_dir(),
+  'bd': meson.current_build_dir(),
+  'regress': {
+    'sql': [
+      'pg_compact_test',
+    ],
+  },
+}
diff --git a/contrib/pg_compact_test/pg_compact_test--1.0.sql b/contrib/pg_compact_test/pg_compact_test--1.0.sql
new file mode 100644
index 00000000000..64f98eccd6e
--- /dev/null
+++ b/contrib/pg_compact_test/pg_compact_test--1.0.sql
@@ -0,0 +1,18 @@
+/* contrib/pg_compact_test/pg_compact_test--1.0.sql */
+
+\echo Use "CREATE EXTENSION pg_compact_test" to load this file. \quit
+
+CREATE FUNCTION pg_test_compact_buffer(rel regclass,
+                                       target_block bigint,
+                                       source_block bigint,
+                                       tuple_size integer)
+RETURNS boolean
+AS 'MODULE_PATHNAME', 'pg_test_compact_buffer'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION pg_test_relocate_tuple(rel regclass,
+                                       source_tid tid,
+                                       target_block bigint)
+RETURNS tid
+AS 'MODULE_PATHNAME', 'pg_test_relocate_tuple'
+LANGUAGE C STRICT;
diff --git a/contrib/pg_compact_test/pg_compact_test.c b/contrib/pg_compact_test/pg_compact_test.c
new file mode 100644
index 00000000000..244c12432a1
--- /dev/null
+++ b/contrib/pg_compact_test/pg_compact_test.c
@@ -0,0 +1,235 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_compact_test.c
+ *	  Exercise heap-compaction primitives in isolation.
+ *
+ *	  Currently exposes pg_test_compact_buffer(), a thin wrapper around
+ *	  RelationGetSpecificBufferForTuple() so the page-targeting logic can
+ *	  be smoke-tested before VACUUM (COMPACT) is built on top of it.
+ *
+ *	  contrib/pg_compact_test/pg_compact_test.c
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/hio.h"
+#include "access/htup_details.h"
+#include "catalog/pg_am.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "storage/bufmgr.h"
+#include "utils/rel.h"
+
+PG_MODULE_MAGIC_EXT(
+					.name = "pg_compact_test",
+					.version = PG_VERSION
+);
+
+PG_FUNCTION_INFO_V1(pg_test_compact_buffer);
+PG_FUNCTION_INFO_V1(pg_test_relocate_tuple);
+
+/*
+ * pg_test_compact_buffer(rel regclass,
+ *                        target_block bigint,
+ *                        source_block bigint,
+ *                        tuple_size integer) RETURNS boolean
+ *
+ *	Pins source_block (without taking its content lock), then calls
+ *	RelationGetSpecificBufferForTuple() asking for tuple_size bytes of
+ *	free space on target_block, with source_block as the "other" buffer.
+ *
+ *	Returns true if the call succeeded (the target page had room and is
+ *	now exclusively locked); the locks and pins are released before
+ *	returning.  Returns false if the target page did not have room (the
+ *	function returned InvalidBuffer).
+ *
+ *	This is intended for testing only.  It does not actually move any
+ *	tuple -- the goal is to exercise the buffer-targeting and locking
+ *	logic.  The relation is opened with RowExclusiveLock to match the
+ *	lock level VACUUM (COMPACT) will eventually use when reusing this
+ *	primitive.
+ */
+Datum
+pg_test_compact_buffer(PG_FUNCTION_ARGS)
+{
+	Oid			relid = PG_GETARG_OID(0);
+	int64		target_block_arg = PG_GETARG_INT64(1);
+	int64		source_block_arg = PG_GETARG_INT64(2);
+	int32		tuple_size = PG_GETARG_INT32(3);
+	Relation	rel;
+	BlockNumber nblocks;
+	BlockNumber target_block;
+	BlockNumber source_block;
+	Buffer		source_buffer;
+	Buffer		target_buffer;
+	Buffer		vmbuffer = InvalidBuffer;
+	Buffer		vmbuffer_other = InvalidBuffer;
+
+	if (target_block_arg < 0 || target_block_arg > MaxBlockNumber)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("target_block out of range: %lld",
+						(long long) target_block_arg)));
+	if (source_block_arg < 0 || source_block_arg > MaxBlockNumber)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("source_block out of range: %lld",
+						(long long) source_block_arg)));
+	if (tuple_size <= 0)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("tuple_size must be positive")));
+
+	target_block = (BlockNumber) target_block_arg;
+	source_block = (BlockNumber) source_block_arg;
+
+	if (target_block == source_block)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("target_block must differ from source_block")));
+
+	rel = relation_open(relid, RowExclusiveLock);
+
+	if (!RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("relation \"%s\" is of wrong relation kind",
+						RelationGetRelationName(rel))));
+
+	if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("pg_test_compact_buffer requires the heap table AM")));
+
+	nblocks = RelationGetNumberOfBlocks(rel);
+	if (target_block >= nblocks || source_block >= nblocks)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("block number out of range for relation \"%s\" (size %u)",
+						RelationGetRelationName(rel), nblocks)));
+
+	/*
+	 * Pin the source buffer.  RelationGetSpecificBufferForTuple expects an
+	 * unlocked buffer here and arranges the buffer-lock acquisition order
+	 * itself.
+	 */
+	source_buffer = ReadBuffer(rel, source_block);
+
+	target_buffer = RelationGetSpecificBufferForTuple(rel,
+													  (Size) tuple_size,
+													  target_block,
+													  source_buffer,
+													  &vmbuffer,
+													  &vmbuffer_other);
+
+	if (target_buffer == InvalidBuffer)
+	{
+		/*
+		 * The function already released the lock and pin on target_buffer.
+		 * It only released the *lock* on source_buffer (it never held one
+		 * to begin with on entry, and on the no-space path it explicitly
+		 * unlocks otherBuffer).  We still hold its pin.
+		 */
+		ReleaseBuffer(source_buffer);
+		if (vmbuffer != InvalidBuffer)
+			ReleaseBuffer(vmbuffer);
+		if (vmbuffer_other != InvalidBuffer)
+			ReleaseBuffer(vmbuffer_other);
+		relation_close(rel, RowExclusiveLock);
+		PG_RETURN_BOOL(false);
+	}
+
+	/*
+	 * Success: both buffers are exclusive-locked and pinned.  Release
+	 * everything without modifying the pages.
+	 */
+	UnlockReleaseBuffer(target_buffer);
+	LockBuffer(source_buffer, BUFFER_LOCK_UNLOCK);
+	ReleaseBuffer(source_buffer);
+	if (vmbuffer != InvalidBuffer)
+		ReleaseBuffer(vmbuffer);
+	if (vmbuffer_other != InvalidBuffer)
+		ReleaseBuffer(vmbuffer_other);
+
+	relation_close(rel, RowExclusiveLock);
+	PG_RETURN_BOOL(true);
+}
+
+/*
+ * pg_test_relocate_tuple(rel regclass, source_tid tid, target_block bigint)
+ *     RETURNS tid
+ *
+ *	Smoke-test wrapper for heap_relocate.  Returns the new TID of the
+ *	relocated tuple, or NULL if heap_relocate refused (concurrent activity,
+ *	target page lost its room, etc.).
+ *
+ *	Note: this wrapper deliberately does NOT update indexes.  The post-
+ *	relocation index-fixup pass requires full executor scaffolding
+ *	(snapshot, ranges, ECxt) that a contrib function cannot reasonably
+ *	construct by hand; that responsibility belongs to the VACUUM (COMPACT)
+ *	orchestrator, which has the right context.  Until that lands, callers
+ *	can verify the relocation via TID-based queries (WHERE ctid = ...) and
+ *	sequential scans, but index-based lookups on the relocated row will
+ *	miss it.
+ */
+Datum
+pg_test_relocate_tuple(PG_FUNCTION_ARGS)
+{
+	Oid			relid = PG_GETARG_OID(0);
+	ItemPointer source_tid = (ItemPointer) PG_GETARG_POINTER(1);
+	int64		target_block_arg = PG_GETARG_INT64(2);
+	BlockNumber target_block;
+	BlockNumber nblocks;
+	Relation	rel;
+	TM_Result	result;
+	TM_FailureData tmfd;
+	TU_UpdateIndexes update_indexes;
+	ItemPointerData new_tid;
+	ItemPointer ret_tid;
+
+	if (target_block_arg < 0 || target_block_arg > MaxBlockNumber)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("target_block out of range: %lld",
+						(long long) target_block_arg)));
+
+	target_block = (BlockNumber) target_block_arg;
+
+	rel = relation_open(relid, RowExclusiveLock);
+
+	if (!RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("relation \"%s\" is of wrong relation kind",
+						RelationGetRelationName(rel))));
+
+	if (rel->rd_rel->relam != HEAP_TABLE_AM_OID)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("pg_test_relocate_tuple requires the heap table AM")));
+
+	nblocks = RelationGetNumberOfBlocks(rel);
+	if (target_block >= nblocks ||
+		ItemPointerGetBlockNumber(source_tid) >= nblocks)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("block number out of range for relation \"%s\" (size %u)",
+						RelationGetRelationName(rel), nblocks)));
+
+	result = heap_relocate(rel, source_tid, target_block,
+						   GetCurrentCommandId(true),
+						   &tmfd, &update_indexes, &new_tid);
+
+	if (result != TM_Ok)
+	{
+		relation_close(rel, RowExclusiveLock);
+		PG_RETURN_NULL();
+	}
+
+	relation_close(rel, RowExclusiveLock);
+
+	ret_tid = (ItemPointer) palloc(sizeof(ItemPointerData));
+	ItemPointerCopy(&new_tid, ret_tid);
+	PG_RETURN_POINTER(ret_tid);
+}
diff --git a/contrib/pg_compact_test/pg_compact_test.control b/contrib/pg_compact_test/pg_compact_test.control
new file mode 100644
index 00000000000..500a96fb127
--- /dev/null
+++ b/contrib/pg_compact_test/pg_compact_test.control
@@ -0,0 +1,5 @@
+# pg_compact_test extension
+comment = 'exercise heap compaction primitives (RelationGetSpecificBufferForTuple)'
+default_version = '1.0'
+module_pathname = '$libdir/pg_compact_test'
+relocatable = true
diff --git a/contrib/pg_compact_test/sql/pg_compact_test.sql b/contrib/pg_compact_test/sql/pg_compact_test.sql
new file mode 100644
index 00000000000..0a538ce8ed3
--- /dev/null
+++ b/contrib/pg_compact_test/sql/pg_compact_test.sql
@@ -0,0 +1,93 @@
+CREATE EXTENSION pg_compact_test;
+
+--
+-- Exercise RelationGetSpecificBufferForTuple via pg_test_compact_buffer().
+--
+-- A fillfactor=50 table guarantees ample free space on every page, so a
+-- small tuple_size is certain to fit regardless of the compiled block size.
+--
+
+CREATE TABLE compacttest (id int, payload text) WITH (fillfactor = 50);
+INSERT INTO compacttest
+SELECT g, repeat('x', 200) FROM generate_series(1, 1000) g;
+
+-- Sanity: at least a few pages so source_block (5) is in range.
+SELECT pg_relation_size('compacttest') / current_setting('block_size')::int >= 6
+  AS has_enough_pages;
+
+-- Success path: a small tuple fits on a half-empty page.
+SELECT pg_test_compact_buffer('compacttest', 0, 5, 200) AS small_tuple_fits;
+
+--
+-- Argument validation.  We use sqlstate verbosity for the messages whose
+-- wording embeds runtime-dependent values like the relation page count.
+--
+
+-- Same source/target page is rejected.
+SELECT pg_test_compact_buffer('compacttest', 3, 3, 200);
+
+-- Non-positive tuple_size is rejected.
+SELECT pg_test_compact_buffer('compacttest', 0, 5, 0);
+
+-- Out-of-range block.  Use sqlstate verbosity because the message embeds
+-- the table's current page count.
+\set VERBOSITY sqlstate
+SELECT pg_test_compact_buffer('compacttest', 0, 999999, 200);
+\set VERBOSITY default
+
+-- Wrong relkind: an index is not a heap.
+CREATE INDEX compacttest_idx ON compacttest (id);
+SELECT pg_test_compact_buffer('compacttest_idx', 0, 1, 100);
+
+DROP TABLE compacttest;
+
+--
+-- Exercise heap_relocate end-to-end via pg_test_relocate_tuple(): move a
+-- tuple from a tail page to page 0 and verify heap-level invariants.
+--
+-- Note: this wrapper does not update indexes (that responsibility belongs
+-- to the VACUUM (COMPACT) orchestrator), so the test uses only TID-based
+-- queries and sequential scans, which see the relocated row directly.
+--
+
+CREATE TABLE relocate_test (id int, payload text) WITH (fillfactor = 50);
+INSERT INTO relocate_test
+SELECT g, repeat('y', 200) FROM generate_series(1, 1000) g;
+
+-- Snapshot the row that lives physically last in the table.
+SELECT ctid AS rel_source_ctid
+  FROM relocate_test
+  ORDER BY ctid DESC
+  LIMIT 1
+\gset
+
+-- Source row really is past page 0.
+SELECT (:'rel_source_ctid'::tid::text NOT LIKE '(0,%)') AS source_past_page_0;
+
+-- Relocate the tuple onto page 0.  Returns the new TID (NULL = skipped).
+SELECT pg_test_relocate_tuple('relocate_test',
+                              :'rel_source_ctid'::tid,
+                              0) AS rel_new_tid
+\gset
+
+-- New TID is on page 0.
+SELECT :'rel_new_tid' LIKE '(0,%)' AS new_tid_on_page_0;
+
+-- The TID changed.
+SELECT (:'rel_source_ctid'::tid <> :'rel_new_tid'::tid) AS tid_changed;
+
+-- Row count is unchanged via sequential scan: no duplicates, no rows lost.
+SELECT count(*) AS row_count FROM relocate_test;
+
+-- The relocated row is reachable at the new TID exactly once.
+SELECT count(*) AS at_new_tid
+  FROM relocate_test
+  WHERE ctid = :'rel_new_tid'::tid;
+
+-- The old TID no longer holds a live row.
+SELECT count(*) AS old_tid_dead
+  FROM relocate_test
+  WHERE ctid = :'rel_source_ctid'::tid;
+
+DROP TABLE relocate_test;
+DROP EXTENSION pg_compact_test;
diff --git a/src/test/isolation/expected/compact.out b/src/test/isolation/expected/compact.out
new file mode 100644
index 00000000000..62fee442f5a
--- /dev/null
+++ b/src/test/isolation/expected/compact.out
@@ -0,0 +1,113 @@
+Parsed test spec with 4 sessions
+
+starting permutation: w_begin w_lock c_compact w_commit c_count c_target
+step w_begin: BEGIN;
+step w_lock: SELECT id FROM compact_iso WHERE id = 9500 FOR UPDATE;
+  id
+----
+9500
+(1 row)
+
+step c_compact: COMPACT compact_iso;
+step w_commit: COMMIT;
+step c_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step c_target: SELECT count(*) AS target_rows FROM compact_iso WHERE id = 9500;
+target_rows
+-----------
+          1
+(1 row)
+
+
+starting permutation: w_begin w_update c_compact w_commit c_count c_target
+step w_begin: BEGIN;
+step w_update: UPDATE compact_iso SET payload = 'changed' WHERE id = 9500;
+step c_compact: COMPACT compact_iso;
+step w_commit: COMMIT;
+step c_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step c_target: SELECT count(*) AS target_rows FROM compact_iso WHERE id = 9500;
+target_rows
+-----------
+          1
+(1 row)
+
+
+starting permutation: w_begin w_keyshare c_compact w_commit c_count c_target
+step w_begin: BEGIN;
+step w_keyshare: SELECT id FROM compact_iso WHERE id = 9500 FOR KEY SHARE;
+  id
+----
+9500
+(1 row)
+
+step c_compact: COMPACT compact_iso;
+step w_commit: COMMIT;
+step c_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step c_target: SELECT count(*) AS target_rows FROM compact_iso WHERE id = 9500;
+target_rows
+-----------
+          1
+(1 row)
+
+
+starting permutation: w_begin w2_begin w_keyshare w2_keyshare c_compact w_commit w2_commit c_count c_target
+step w_begin: BEGIN;
+step w2_begin: BEGIN;
+step w_keyshare: SELECT id FROM compact_iso WHERE id = 9500 FOR KEY SHARE;
+  id
+----
+9500
+(1 row)
+
+step w2_keyshare: SELECT id FROM compact_iso WHERE id = 9500 FOR KEY SHARE;
+  id
+----
+9500
+(1 row)
+
+step c_compact: COMPACT compact_iso;
+step w_commit: COMMIT;
+step w2_commit: COMMIT;
+step c_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step c_target: SELECT count(*) AS target_rows FROM compact_iso WHERE id = 9500;
+target_rows
+-----------
+          1
+(1 row)
+
+
+starting permutation: r_begin r_count c_compact r_count r_commit
+step r_begin: BEGIN ISOLATION LEVEL REPEATABLE READ;
+step r_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step c_compact: COMPACT compact_iso;
+step r_count: SELECT count(*) AS rows FROM compact_iso;
+rows
+----
+1000
+(1 row)
+
+step r_commit: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 1578ba191c8..c971aecfd43 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -119,6 +119,7 @@ test: plpgsql-toast
 test: cluster-conflict
 test: cluster-conflict-partition
 test: cluster-toast-value-reuse
+test: compact
 test: truncate-conflict
 test: serializable-parallel
 test: serializable-parallel-2
diff --git a/src/test/isolation/specs/compact.spec b/src/test/isolation/specs/compact.spec
new file mode 100644
index 00000000000..a4cff4d64af
--- /dev/null
+++ b/src/test/isolation/specs/compact.spec
@@ -0,0 +1,75 @@
+# Tests for COMPACT interactions with concurrent transactions.
+#
+# heap_relocate skips tuples it cannot safely move (live conflicting
+# updaters, exclusive lockers) but accepts tuples that have only
+# key-share lockers, preserving those lockers on the new tuple's xmax.
+# These permutations check that:
+#
+#  - A tuple held under FOR UPDATE in another session is skipped.
+#  - A tuple being updated by another session is skipped.
+#  - A tuple held under a single FOR KEY SHARE lock is relocated
+#    successfully (the locker is preserved).
+#  - A tuple held under multiple key-share locks (which form a
+#    multixact) is similarly relocated successfully.
+#  - A REPEATABLE READ reader sees a stable count across a COMPACT.
+
+setup
+{
+    CREATE TABLE compact_iso (id int, payload text) WITH (fillfactor = 100);
+    INSERT INTO compact_iso
+        SELECT g, repeat('z', 200) FROM generate_series(1, 10000) g;
+    DELETE FROM compact_iso WHERE id <= 9000;
+    -- COMPACT's first internal vacuum pass will prune the dead tuples;
+    -- a separate VACUUM here would be ideal but cannot run inside the
+    -- isolation tester's setup transaction.
+}
+
+teardown
+{
+    DROP TABLE compact_iso;
+}
+
+session "w"
+step "w_begin"  { BEGIN; }
+step "w_lock"   { SELECT id FROM compact_iso WHERE id = 9500 FOR UPDATE; }
+step "w_update" { UPDATE compact_iso SET payload = 'changed' WHERE id = 9500; }
+step "w_keyshare" { SELECT id FROM compact_iso WHERE id = 9500 FOR KEY SHARE; }
+step "w_commit" { COMMIT; }
+
+# Second key-share locker; combined with w's FOR KEY SHARE this produces
+# a multixact on the target tuple, exercising heap_relocate's multixact
+# branch.
+session "w2"
+step "w2_begin"    { BEGIN; }
+step "w2_keyshare" { SELECT id FROM compact_iso WHERE id = 9500 FOR KEY SHARE; }
+step "w2_commit"   { COMMIT; }
+
+session "c"
+step "c_compact" { COMPACT compact_iso; }
+step "c_count"   { SELECT count(*) AS rows FROM compact_iso; }
+step "c_target"  { SELECT count(*) AS target_rows FROM compact_iso WHERE id = 9500; }
+
+session "r"
+step "r_begin"  { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step "r_count"  { SELECT count(*) AS rows FROM compact_iso; }
+step "r_commit" { COMMIT; }
+
+
+# FOR UPDATE: row is skipped by the relocation pass; COMPACT still
+# completes and the row remains intact.
+permutation "w_begin" "w_lock" "c_compact" "w_commit" "c_count" "c_target"
+
+# Concurrent UPDATE: row is skipped, count preserved after writer commits.
+permutation "w_begin" "w_update" "c_compact" "w_commit" "c_count" "c_target"
+
+# Single FOR KEY SHARE: row is relocated, locker is preserved on the
+# new tuple, no row loss after the locker commits.
+permutation "w_begin" "w_keyshare" "c_compact" "w_commit" "c_count" "c_target"
+
+# Two FOR KEY SHARE lockers (multixact): row is relocated, both lockers
+# are preserved, no row loss after both commit.
+permutation "w_begin" "w2_begin" "w_keyshare" "w2_keyshare" "c_compact"
+            "w_commit" "w2_commit" "c_count" "c_target"
+
+# REPEATABLE READ reader is unaffected by an intervening COMPACT.
+permutation "r_begin" "r_count" "c_compact" "r_count" "r_commit"
diff --git a/src/test/regress/expected/compact.out b/src/test/regress/expected/compact.out
new file mode 100644
index 00000000000..403cb7a120e
--- /dev/null
+++ b/src/test/regress/expected/compact.out
@@ -0,0 +1,61 @@
+--
+-- COMPACT
+--
+-- A bloated table whose live rows have drifted to the physical end.
+-- After VACUUM, dead tuples are gone but the trailing pages still hold the
+-- 1000 surviving rows.  COMPACT relocates them to low-numbered pages and
+-- truncates the trailing empties in a single command.
+CREATE TABLE compact_bloat (id int, payload text) WITH (fillfactor = 100);
+INSERT INTO compact_bloat
+    SELECT g, repeat('z', 200) FROM generate_series(1, 10000) g;
+DELETE FROM compact_bloat WHERE id <= 9000;
+VACUUM compact_bloat;
+-- Snapshot of the physical-block range of live rows before compaction.
+SELECT (min((ctid::text::point)[0]::int) > 64) AS rows_at_high_pages
+  FROM compact_bloat;
+ rows_at_high_pages 
+--------------------
+ t
+(1 row)
+
+-- Run COMPACT.
+COMPACT compact_bloat;
+-- Row count is preserved.
+SELECT count(*) AS row_count FROM compact_bloat;
+ row_count 
+-----------
+      1000
+(1 row)
+
+-- All surviving rows are now physically near the start.  This is the
+-- defining outcome of the relocation pass and does not depend on whether
+-- the truncation step was able to reclaim trailing pages (which in turn
+-- depends on OldestXmin and so can be held back by concurrent activity
+-- in a parallel test slot).
+SELECT (max((ctid::text::point)[0]::int) < 64) AS rows_packed_to_low_pages
+  FROM compact_bloat;
+ rows_packed_to_low_pages 
+--------------------------
+ t
+(1 row)
+
+DROP TABLE compact_bloat;
+-- Options-list form, with ANALYZE.
+CREATE TABLE compact_opts (id int, payload text) WITH (fillfactor = 100);
+INSERT INTO compact_opts
+    SELECT g, repeat('q', 200) FROM generate_series(1, 2000) g;
+DELETE FROM compact_opts WHERE id <= 1800;
+VACUUM compact_opts;
+COMPACT (ANALYZE) compact_opts;
+SELECT count(*) FROM compact_opts;
+ count 
+-------
+   200
+(1 row)
+
+DROP TABLE compact_opts;
+-- Argument validation.
+COMPACT (FOO) some_table;
+ERROR:  unrecognized COMPACT option "foo"
+LINE 1: COMPACT (FOO) some_table;
+                 ^
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 8fa0a6c47fb..e0ec02559df 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -128,7 +128,7 @@ test: partition_merge partition_split partition_join partition_prune reloptions
 # ----------
 # Another group of parallel tests (compression)
 # ----------
-test: compression compression_lz4 compression_pglz cluster
+test: compression compression_lz4 compression_pglz cluster compact
 
 # event_trigger depends on create_am and cannot run concurrently with
 # any test that runs DDL
diff --git a/src/test/regress/sql/compact.sql b/src/test/regress/sql/compact.sql
new file mode 100644
index 00000000000..4c09c05c9d2
--- /dev/null
+++ b/src/test/regress/sql/compact.sql
@@ -0,0 +1,46 @@
+--
+-- COMPACT
+--
+
+-- A bloated table whose live rows have drifted to the physical end.
+-- After VACUUM, dead tuples are gone but the trailing pages still hold the
+-- 1000 surviving rows.  COMPACT relocates them to low-numbered pages and
+-- truncates the trailing empties in a single command.
+CREATE TABLE compact_bloat (id int, payload text) WITH (fillfactor = 100);
+INSERT INTO compact_bloat
+    SELECT g, repeat('z', 200) FROM generate_series(1, 10000) g;
+DELETE FROM compact_bloat WHERE id <= 9000;
+VACUUM compact_bloat;
+
+-- Snapshot of the physical-block range of live rows before compaction.
+SELECT (min((ctid::text::point)[0]::int) > 64) AS rows_at_high_pages
+  FROM compact_bloat;
+
+-- Run COMPACT.
+COMPACT compact_bloat;
+
+-- Row count is preserved.
+SELECT count(*) AS row_count FROM compact_bloat;
+
+-- All surviving rows are now physically near the start.  This is the
+-- defining outcome of the relocation pass and does not depend on whether
+-- the truncation step was able to reclaim trailing pages (which in turn
+-- depends on OldestXmin and so can be held back by concurrent activity
+-- in a parallel test slot).
+SELECT (max((ctid::text::point)[0]::int) < 64) AS rows_packed_to_low_pages
+  FROM compact_bloat;
+
+DROP TABLE compact_bloat;
+
+-- Options-list form, with ANALYZE.
+CREATE TABLE compact_opts (id int, payload text) WITH (fillfactor = 100);
+INSERT INTO compact_opts
+    SELECT g, repeat('q', 200) FROM generate_series(1, 2000) g;
+DELETE FROM compact_opts WHERE id <= 1800;
+VACUUM compact_opts;
+COMPACT (ANALYZE) compact_opts;
+SELECT count(*) FROM compact_opts;
+DROP TABLE compact_opts;
+
+-- Argument validation.
+COMPACT (FOO) some_table;
-- 
2.47.3

