From ac42f990b85ae4034f16acf9929ce28e18ec2088 Mon Sep 17 00:00:00 2001
From: Andres Freund <andres@anarazel.de>
Date: Tue, 7 Jan 2025 14:42:12 -0500
Subject: [PATCH v2.3 11/30] aio: Core AIO implementation

At this point nothing can use AIO - this commit does not include any
implementation of aio subjects / callbacks. That will come in later commits.

Todo:
- lots of cleanup
---
 src/include/storage/aio.h                     | 301 ++++++
 src/include/storage/aio_internal.h            | 295 ++++++
 src/include/storage/aio_types.h               | 115 +++
 src/include/utils/resowner.h                  |   5 +
 src/backend/access/transam/xact.c             |   9 +
 src/backend/storage/aio/Makefile              |   4 +
 src/backend/storage/aio/aio.c                 | 904 ++++++++++++++++++
 src/backend/storage/aio/aio_callback.c        | 280 ++++++
 src/backend/storage/aio/aio_init.c            | 186 ++++
 src/backend/storage/aio/aio_io.c              | 175 ++++
 src/backend/storage/aio/aio_target.c          | 108 +++
 src/backend/storage/aio/meson.build           |   4 +
 src/backend/storage/aio/method_sync.c         |  47 +
 .../utils/activity/wait_event_names.txt       |   3 +
 src/backend/utils/resowner/resowner.c         |  30 +
 src/tools/pgindent/typedefs.list              |  21 +
 16 files changed, 2487 insertions(+)
 create mode 100644 src/include/storage/aio_internal.h
 create mode 100644 src/include/storage/aio_types.h
 create mode 100644 src/backend/storage/aio/aio_callback.c
 create mode 100644 src/backend/storage/aio/aio_io.c
 create mode 100644 src/backend/storage/aio/aio_target.c
 create mode 100644 src/backend/storage/aio/method_sync.c

diff --git a/src/include/storage/aio.h b/src/include/storage/aio.h
index 0e3fadac543..ffd382593d0 100644
--- a/src/include/storage/aio.h
+++ b/src/include/storage/aio.h
@@ -14,6 +14,9 @@
 #ifndef AIO_H
 #define AIO_H
 
+#include "storage/aio_types.h"
+#include "storage/procnumber.h"
+
 
 
 /* Enum for io_method GUC. */
@@ -26,9 +29,307 @@ typedef enum IoMethod
 #define DEFAULT_IO_METHOD IOMETHOD_SYNC
 
 
+/*
+ * Flags for an IO that can be set with pgaio_io_set_flag().
+ */
+typedef enum PgAioHandleFlags
+{
+	/*
+	 * The IO references backend local memory.
+	 *
+	 * This needs to be set on an IO whenever the IO references process-local
+	 * memory. Some IO methods do not support executing IO that references
+	 * process local memory and thus need to fall back to executing IO
+	 * synchronously for IOs with this flag set.
+	 *
+	 * Required for correctness.
+	 */
+	PGAIO_HF_REFERENCES_LOCAL = 1 << 1,
+
+	/*
+	 * Hint that IO will be executed synchronously.
+	 *
+	 * This can make it a bit cheaper to execute synchronous IO via the AIO
+	 * interface, to avoid needing an AIO and non-AIO version of code.
+	 *
+	 * Advantageous to set, if applicable, but not required for correctness.
+	 */
+	PGAIO_HF_SYNCHRONOUS = 1 << 0,
+
+	/*
+	 * IO is using buffered IO, used to control heuristic in some IO methods.
+	 *
+	 * Advantageous to set, if applicable, but not required for correctness.
+	 */
+	PGAIO_HF_BUFFERED = 1 << 2,
+} PgAioHandleFlags;
+
+/*
+ * The IO operations supported by the AIO subsystem.
+ *
+ * This could be in aio_internal.h, as it is not publically referenced, but
+ * PgAioOpData currently *does* need to be public, therefore keeping this
+ * public seems to make sense.
+ */
+typedef enum PgAioOp
+{
+	/* intentionally the zero value, to help catch zeroed memory etc */
+	PGAIO_OP_INVALID = 0,
+
+	PGAIO_OP_READV,
+	PGAIO_OP_WRITEV,
+
+	/**
+	 * In the near term we'll need at least:
+	 * - fsync / fdatasync
+	 * - flush_range
+	 *
+	 * Eventually we'll additionally want at least:
+	 * - send
+	 * - recv
+	 * - accept
+	 **/
+} PgAioOp;
+
+#define PGAIO_OP_COUNT	(PGAIO_OP_WRITEV + 1)
+
+
+/*
+ * On what is IO being performed.
+ *
+ * PgAioTargetID specific behaviour should be implemented in
+ * aio_target.c.
+ */
+typedef enum PgAioTargetID
+{
+	/* intentionally the zero value, to help catch zeroed memory etc */
+	PGAIO_TID_INVALID = 0,
+} PgAioTargetID;
+
+#define PGAIO_TID_COUNT (PGAIO_TID_INVALID + 1)
+
+
+/*
+ * Data necessary for support IO operations (see PgAioOp).
+ *
+ * NB: Note that the FDs in here may *not* be relied upon for re-issuing
+ * requests (e.g. for partial reads/writes) - the FD might be from another
+ * process, or closed since. That's not a problem for IOs waiting to be issued
+ * only because the queue is flushed when closing an FD.
+ */
+typedef union
+{
+	struct
+	{
+		int			fd;
+		uint16		iov_length;
+		uint64		offset;
+	}			read;
+
+	struct
+	{
+		int			fd;
+		uint16		iov_length;
+		uint64		offset;
+	}			write;
+} PgAioOpData;
+
+
+/*
+ * Information the object that IO is executed on. Mostly callbacks that
+ * operate on PgAioTargetData.
+ */
+typedef struct PgAioTargetInfo
+{
+	void		(*reopen) (PgAioHandle *ioh);
+
+	char	   *(*describe_identity) (const PgAioTargetData *sd);
+
+	const char *name;
+} PgAioTargetInfo;
+
+
+/*
+ * IDs for callbacks that can be registered on an IO.
+ *
+ * Callbacks are identified by an ID rather than a function pointer. There are
+ * two main reasons:
+ *
+ * 1) Memory within PgAioHandle is precious, due to the number of PgAioHandle
+ *    structs in pre-allocated shared memory.
+ *
+ * 2) Due to EXEC_BACKEND function pointers are not necessarily stable between
+ *    different backends, therefore function pointers cannot directly be in
+ *    shared memory.
+ *
+ * Without 2), we could fairly easily allow to add new callbacks, by filling a
+ * ID->pointer mapping table on demand. In the presence of 2 that's still
+ * doable, but harder, because every process has to re-register the pointers
+ * so that a local ID->"backend local pointer" mapping can be maintained.
+ */
+typedef enum PgAioHandleCallbackID
+{
+	PGAIO_HCB_INVALID,
+} PgAioHandleCallbackID;
+
+
+typedef void (*PgAioHandleCallbackStage) (PgAioHandle *ioh);
+typedef PgAioResult (*PgAioHandleCallbackComplete) (PgAioHandle *ioh, PgAioResult prior_result);
+typedef void (*PgAioHandleCallbackReport) (PgAioResult result, const PgAioTargetData *target_data, int elevel);
+
+typedef struct PgAioHandleCallbacks
+{
+	/*
+	 * Prepare resources affected by the IO for execution. This could e.g.
+	 * include moving ownership of buffer pins to the AIO subsystem.
+	 */
+	PgAioHandleCallbackStage stage;
+
+	/*
+	 * Update the state of resources affected by the IO to reflect completion
+	 * of the IO. This could e.g. include updating shared buffer state to
+	 * signal the IO has finished.
+	 *
+	 * The _shared suffix indicates that this is executed by the backend that
+	 * completed the IO, which may or may not be the backend that issued the
+	 * IO.  Obviously the callback thus can only modify resources in shared
+	 * memory.
+	 *
+	 * The latest registered callback is called first. This allows
+	 * higher-level code to register callbacks that can rely on callbacks
+	 * registered by lower-level code to already have been executed.
+	 *
+	 * NB: This is called in a critical section. Errors can be signalled by
+	 * the callback's return value, it's the responsibility of the IO's issuer
+	 * to react appropriately.
+	 */
+	PgAioHandleCallbackComplete complete_shared;
+
+	/*
+	 * Like complete_shared, except called in the issuing backend.
+	 *
+	 * This variant of the completion callback is useful when backend-local
+	 * state has to be updated to reflect the IO's completion. E.g. a
+	 * temporary buffer's BufferDesc isn't accessible in complete_shared.
+	 *
+	 * Local callbacks are only called after complete_shared for all
+	 * registered callbacks has been called.
+	 */
+	PgAioHandleCallbackComplete complete_local;
+
+	/*
+	 * Report the result of an IO operation. This is e.g. used to raise an
+	 * error after an IO failed at the appropriate time (i.e. not when the IO
+	 * failed, but under control of the code that issued the IO).
+	 */
+	PgAioHandleCallbackReport report;
+} PgAioHandleCallbacks;
+
+
+
+/*
+ * How many callbacks can be registered for one IO handle. Currently we only
+ * need two, but it's not hard to imagine needing a few more.
+ */
+#define PGAIO_HANDLE_MAX_CALLBACKS	4
+
+
+
+/* AIO API */
+
+
+/* --------------------------------------------------------------------------------
+ * IO Handles
+ * --------------------------------------------------------------------------------
+ */
+
+/* functions in aio.c */
+struct ResourceOwnerData;
+extern PgAioHandle *pgaio_io_acquire(struct ResourceOwnerData *resowner, PgAioReturn *ret);
+extern PgAioHandle *pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret);
+
+extern void pgaio_io_release(PgAioHandle *ioh);
+struct dlist_node;
+extern void pgaio_io_release_resowner(struct dlist_node *ioh_node, bool on_error);
+
+extern void pgaio_io_set_flag(PgAioHandle *ioh, PgAioHandleFlags flag);
+
+extern int	pgaio_io_get_id(PgAioHandle *ioh);
+extern ProcNumber pgaio_io_get_owner(PgAioHandle *ioh);
+
+extern void pgaio_io_get_wref(PgAioHandle *ioh, PgAioWaitRef *iow);
+
+/* functions in aio_io.c */
+struct iovec;
+extern int	pgaio_io_get_iovec(PgAioHandle *ioh, struct iovec **iov);
+
+extern PgAioOpData *pgaio_io_get_op_data(PgAioHandle *ioh);
+
+extern void pgaio_io_prep_readv(PgAioHandle *ioh,
+								int fd, int iovcnt, uint64 offset);
+extern void pgaio_io_prep_writev(PgAioHandle *ioh,
+								 int fd, int iovcnt, uint64 offset);
+
+/* functions in aio_target.c */
+extern void pgaio_io_set_target(PgAioHandle *ioh, PgAioTargetID targetid);
+extern bool pgaio_io_has_target(PgAioHandle *ioh);
+extern PgAioTargetData *pgaio_io_get_target_data(PgAioHandle *ioh);
+extern char *pgaio_io_get_target_description(PgAioHandle *ioh);
+
+/* functions in aio_callback.c */
+extern void pgaio_io_register_callbacks(PgAioHandle *ioh, PgAioHandleCallbackID cbid);
+extern void pgaio_io_set_handle_data_64(PgAioHandle *ioh, uint64 *data, uint8 len);
+extern void pgaio_io_set_handle_data_32(PgAioHandle *ioh, uint32 *data, uint8 len);
+extern uint64 *pgaio_io_get_handle_data(PgAioHandle *ioh, uint8 *len);
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO Wait References
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_wref_clear(PgAioWaitRef *iow);
+extern bool pgaio_wref_valid(PgAioWaitRef *iow);
+extern int	pgaio_wref_get_id(PgAioWaitRef *iow);
+
+extern void pgaio_wref_wait(PgAioWaitRef *iow);
+extern bool pgaio_wref_check_done(PgAioWaitRef *iow);
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO Result
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_result_report(PgAioResult result, const PgAioTargetData *target_data,
+								int elevel);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Actions on multiple IOs.
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_submit_staged(void);
+extern bool pgaio_have_staged(void);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Other
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_closing_fd(int fd);
+extern void pgaio_at_xact_end(bool is_subxact, bool is_commit);
+extern void pgaio_at_error(void);
 extern void assign_io_method(int newval, void *extra);
 
 
+
 /* GUCs */
 extern PGDLLIMPORT int io_method;
 extern PGDLLIMPORT int io_max_concurrency;
diff --git a/src/include/storage/aio_internal.h b/src/include/storage/aio_internal.h
new file mode 100644
index 00000000000..174d365f9c0
--- /dev/null
+++ b/src/include/storage/aio_internal.h
@@ -0,0 +1,295 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_internal.h
+ *    AIO related declarations that shoul only be used by the AIO subsystem
+ *    internally.
+ *
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/aio_internal.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef AIO_INTERNAL_H
+#define AIO_INTERNAL_H
+
+
+#include "lib/ilist.h"
+#include "port/pg_iovec.h"
+#include "storage/aio.h"
+#include "storage/condition_variable.h"
+
+
+/* AFIXME */
+#define PGAIO_SUBMIT_BATCH_SIZE 32
+
+
+
+typedef enum PgAioHandleState
+{
+	/* not in use */
+	PGAIO_HS_IDLE = 0,
+
+	/* returned by pgaio_io_acquire() */
+	PGAIO_HS_HANDED_OUT,
+
+	/* pgaio_io_prep_*() has been called, but IO hasn't been submitted yet */
+	PGAIO_HS_DEFINED,
+
+	/* target's stage() callback has been called, ready to be submitted */
+	PGAIO_HS_STAGED,
+
+	/* IO has been submitted and is being executed */
+	PGAIO_HS_SUBMITTED,
+
+	/* IO finished, but result has not yet been processed */
+	PGAIO_HS_COMPLETED_IO,
+
+	/* IO completed, shared completion has been called */
+	PGAIO_HS_COMPLETED_SHARED,
+
+	/* IO completed, local completion has been called */
+	PGAIO_HS_COMPLETED_LOCAL,
+} PgAioHandleState;
+
+
+struct ResourceOwnerData;
+
+/* typedef is in public header */
+struct PgAioHandle
+{
+	/* all state updates should go through pgaio_io_update_state() */
+	PgAioHandleState state:8;
+
+	/* what are we operating on */
+	PgAioTargetID target:8;
+
+	/* which IO operation */
+	PgAioOp		op:8;
+
+	/* bitfield of PgAioHandleFlags */
+	uint8		flags;
+
+	uint8		num_shared_callbacks;
+
+	/* using the proper type here would use more space */
+	uint8		shared_callbacks[PGAIO_HANDLE_MAX_CALLBACKS];
+
+	/*
+	 * Length of data associated with handle using
+	 * pgaio_io_set_handle_data_*().
+	 */
+	uint8		handle_data_len;
+
+	/* XXX: could be optimized out with some pointer math */
+	int32		owner_procno;
+
+	/* raw result of the IO operation */
+	int32		result;
+
+	/*
+	 * Index into PgAioCtl->iovecs and PgAioCtl->handle_data.
+	 *
+	 * At the moment there's no need to differentiate between the two, but
+	 * that won't necessarily stay that way.
+	 */
+	uint32		iovec_off;
+
+	/**
+	 * In which list the handle is registered, depends on the state:
+	 * - IDLE, in per-backend list
+	 * - HANDED_OUT - not in a list
+	 * - DEFINED - in per-backend staged list
+	 * - STAGED - in per-backend staged list
+	 * - SUBMITTED - in issuer's in_flight list
+	 * - COMPLETED_IO - in issuer's in_flight list
+	 * - COMPLETED_SHARED - in issuer's in_flight list
+	 **/
+	dlist_node	node;
+
+	struct ResourceOwnerData *resowner;
+	dlist_node	resowner_node;
+
+	/* incremented every time the IO handle is reused */
+	uint64		generation;
+
+	ConditionVariable cv;
+
+	/* result of shared callback, passed to issuer callback */
+	PgAioResult distilled_result;
+
+	PgAioReturn *report_return;
+
+	PgAioOpData op_data;
+
+	/*
+	 * Data necessary to identify the object undergoing IO to higher-level
+	 * code. Needs to be sufficient to allow another backend to reopen the
+	 * file.
+	 */
+	PgAioTargetData target_data;
+};
+
+
+typedef struct PgAioBackend
+{
+	/* index into PgAioCtl->io_handles */
+	uint32		io_handle_off;
+
+	/* IO Handles that currently are not used */
+	dclist_head idle_ios;
+
+	/*
+	 * Only one IO may be returned by pgaio_io_acquire()/pgaio_io_acquire()
+	 * without having been either defined (by actually associating it with IO)
+	 * or by released (with pgaio_io_release()). This restriction is necessary
+	 * to guarantee that we always can acquire an IO. ->handed_out_io is used
+	 * to enforce that rule.
+	 */
+	PgAioHandle *handed_out_io;
+
+	/*
+	 * IOs that are defined, but not yet submitted.
+	 */
+	uint16		num_staged_ios;
+	PgAioHandle *staged_ios[PGAIO_SUBMIT_BATCH_SIZE];
+
+	/*
+	 * List of in-flight IOs. Also contains IOs that aren't strict speaking
+	 * in-flight anymore, but have been waited-for and completed by another
+	 * backend. Once this backend sees such an IO it'll be reclaimed.
+	 *
+	 * The list is ordered by submission time, with more recently submitted
+	 * IOs being appended at the end.
+	 */
+	dclist_head in_flight_ios;
+} PgAioBackend;
+
+
+typedef struct PgAioCtl
+{
+	int			backend_state_count;
+	PgAioBackend *backend_state;
+
+	/*
+	 * Array of iovec structs. Each iovec is owned by a specific backend. The
+	 * allocation is in PgAioCtl to allow the maximum number of iovecs for
+	 * individual IOs to be configurable with PGC_POSTMASTER GUC.
+	 */
+	uint64		iovec_count;
+	struct iovec *iovecs;
+
+	/*
+	 * For, e.g., an IO covering multiple buffers in shared / temp buffers, we
+	 * need to get Buffer IDs during completion to be able to change the
+	 * BufferDesc state accordingly. This space can be used to store e.g.
+	 * Buffer IDs.  Note that the actual iovec might be shorter than this,
+	 * because we combine neighboring pages into one larger iovec entry.
+	 */
+	uint64	   *handle_data;
+
+	uint64		io_handle_count;
+	PgAioHandle *io_handles;
+} PgAioCtl;
+
+
+
+/*
+ * The set of callbacks that each IO method must implement.
+ *
+ * AFIXME: Document these.
+ */
+typedef struct IoMethodOps
+{
+	/* global initialization */
+	size_t		(*shmem_size) (void);
+	void		(*shmem_init) (bool first_time);
+
+	/* per-backend initialization */
+	void		(*init_backend) (void);
+
+	/* handling of IOs */
+	bool		(*needs_synchronous_execution) (PgAioHandle *ioh);
+	int			(*submit) (uint16 num_staged_ios, PgAioHandle **staged_ios);
+
+	void		(*wait_one) (PgAioHandle *ioh,
+							 uint64 ref_generation);
+} IoMethodOps;
+
+
+/* aio.c */
+extern bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state);
+extern void pgaio_io_stage(PgAioHandle *ioh, PgAioOp op);
+extern void pgaio_io_process_completion(PgAioHandle *ioh, int result);
+extern void pgaio_io_prepare_submit(PgAioHandle *ioh);
+extern bool pgaio_io_needs_synchronous_execution(PgAioHandle *ioh);
+extern const char *pgaio_io_get_state_name(PgAioHandle *ioh);
+extern void pgaio_shutdown(int code, Datum arg);
+
+/* aio_callback.c */
+extern void pgaio_io_call_stage(PgAioHandle *ioh);
+extern void pgaio_io_call_complete_shared(PgAioHandle *ioh);
+extern void pgaio_io_call_complete_local(PgAioHandle *ioh);
+
+/* aio_io.c */
+extern void pgaio_io_perform_synchronously(PgAioHandle *ioh);
+extern const char *pgaio_io_get_op_name(PgAioHandle *ioh);
+
+/* aio_target.c */
+extern bool pgaio_io_can_reopen(PgAioHandle *ioh);
+extern void pgaio_io_reopen(PgAioHandle *ioh);
+extern const char *pgaio_io_get_target_name(PgAioHandle *ioh);
+
+
+/*
+ * The AIO subsystem has fairly verbose debug logging support. This can be
+ * enabled/disabled at buildtime. The reason for this is that
+ * a) the verbosity can make debugging things on higher levels hard
+ * b) even if logging can be skipped due to elevel checks, it still causes a
+ *    measurable slowdown
+ */
+#define PGAIO_VERBOSE		1
+
+/*
+ * Simple ereport() wrapper that only logs if PGAIO_VERBOSE is defined.
+ *
+ * This intentionally still compiles the code, guarded by a constant if (0),
+ * if verbose logging is disabled, to make it less likely that debug logging
+ * is silently broken.
+ *
+ * The current definition requires passing at least one argument.
+ */
+#define pgaio_debug(elevel, msg, ...)  \
+	do { \
+		if (PGAIO_VERBOSE) \
+			ereport(elevel, \
+					errhidestmt(true), errhidecontext(true), \
+					errmsg_internal(msg, \
+									__VA_ARGS__)); \
+	} while(0)
+
+/*
+ * Simple ereport() wrapper. Note that the definition requires passing at
+ * least one argument.
+ */
+#define pgaio_debug_io(elevel, ioh, msg, ...)  \
+	pgaio_debug(elevel, "io %-10d|op %-5s|target %-4s|state %-16s: " msg, \
+				pgaio_io_get_id(ioh), \
+				pgaio_io_get_op_name(ioh), \
+				pgaio_io_get_target_name(ioh), \
+				pgaio_io_get_state_name(ioh), \
+				__VA_ARGS__)
+
+
+/* Declarations for the tables of function pointers exposed by each IO method. */
+extern PGDLLIMPORT const IoMethodOps pgaio_sync_ops;
+
+extern PGDLLIMPORT const IoMethodOps *pgaio_method_ops;
+extern PGDLLIMPORT PgAioCtl *pgaio_ctl;
+extern PGDLLIMPORT PgAioBackend *pgaio_my_backend;
+
+
+
+#endif							/* AIO_INTERNAL_H */
diff --git a/src/include/storage/aio_types.h b/src/include/storage/aio_types.h
new file mode 100644
index 00000000000..d2617139a25
--- /dev/null
+++ b/src/include/storage/aio_types.h
@@ -0,0 +1,115 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_types.h
+ *    AIO related types that are useful to include separately, to reduce the
+ *    "include burden".
+ *
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/aio_types.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef AIO_TYPES_H
+#define AIO_TYPES_H
+
+#include "storage/block.h"
+#include "storage/relfilelocator.h"
+
+
+typedef struct PgAioHandle PgAioHandle;
+
+/*
+ * A reference to an IO that can be used to wait for the IO (using
+ * pgaio_wref_wait()) to complete.
+ *
+ * These can be passed across process boundaries.
+ */
+typedef struct PgAioWaitRef
+{
+	/* internal ID identifying the specific PgAioHandle */
+	uint32		aio_index;
+
+	/*
+	 * IO handles are reused. To detect if a handle was reused, and thereby
+	 * avoid unnecessarily waiting for a newer IO, each time the handle is
+	 * reused a generation number is increased.
+	 *
+	 * To avoid requiring alignment sufficient for an int64, split the
+	 * generation into two.
+	 */
+	uint32		generation_upper;
+	uint32		generation_lower;
+} PgAioWaitRef;
+
+
+/*
+ * Information identifying what the IO is being performed on.
+ *
+ * This needs sufficient information to
+ *
+ * a) Reopen the file for the IO if the IO is executed in a context that
+ *    cannot use the FD provided initially (e.g. because the IO is executed in
+ *    a worker process).
+ *
+ * b) Describe the object the IO is performed on in log / error messages.
+ */
+typedef union PgAioTargetData
+{
+	/* just as an example placeholder for later */
+	struct
+	{
+		uint32		queue_id;
+	}			wal;
+} PgAioTargetData;
+
+
+/*
+ * The status of an AIO operation.
+ */
+typedef enum PgAioResultStatus
+{
+	ARS_UNKNOWN,				/* not yet completed / uninitialized */
+	ARS_OK,
+	ARS_PARTIAL,				/* did not fully succeed, but no error */
+	ARS_ERROR,
+} PgAioResultStatus;
+
+
+/*
+ * Result of IO operation, visible only to the initiator of IO.
+ */
+typedef struct PgAioResult
+{
+	/*
+	 * This is of type PgAioHandleCallbackID, but can't use a bitfield of an
+	 * enum, because some compilers treat enums as signed.
+	 */
+	uint32		id:8;
+
+	/* of type PgAioResultStatus, see above */
+	uint32		status:2;
+
+	/* meaning defined by callback->error */
+	uint32		error_data:22;
+
+	int32		result;
+} PgAioResult;
+
+
+/*
+ * Combination of PgAioResult with minimal metadata about the IO.
+ *
+ * Contains sufficient information to be able, in case the IO [partially]
+ * fails, to log/raise an error under control of the IO issuing code.
+ */
+typedef struct PgAioReturn
+{
+	PgAioResult result;
+	PgAioTargetData target_data;
+} PgAioReturn;
+
+
+#endif							/* AIO_TYPES_H */
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index e8d452ca7ee..aede4bfc820 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -164,4 +164,9 @@ struct LOCALLOCK;
 extern void ResourceOwnerRememberLock(ResourceOwner owner, struct LOCALLOCK *locallock);
 extern void ResourceOwnerForgetLock(ResourceOwner owner, struct LOCALLOCK *locallock);
 
+/* special support for AIO */
+struct dlist_node;
+extern void ResourceOwnerRememberAioHandle(ResourceOwner owner, struct dlist_node *ioh_node);
+extern void ResourceOwnerForgetAioHandle(ResourceOwner owner, struct dlist_node *ioh_node);
+
 #endif							/* RESOWNER_H */
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index d331ab90d78..a252c3a81b4 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -51,6 +51,7 @@
 #include "replication/origin.h"
 #include "replication/snapbuild.h"
 #include "replication/syncrep.h"
+#include "storage/aio.h"
 #include "storage/condition_variable.h"
 #include "storage/fd.h"
 #include "storage/lmgr.h"
@@ -2475,6 +2476,8 @@ CommitTransaction(void)
 	AtEOXact_LogicalRepWorkers(true);
 	pgstat_report_xact_timestamp(0);
 
+	pgaio_at_xact_end( /* is_subxact = */ false, /* is_commit = */ true);
+
 	ResourceOwnerDelete(TopTransactionResourceOwner);
 	s->curTransactionOwner = NULL;
 	CurTransactionResourceOwner = NULL;
@@ -2988,6 +2991,8 @@ AbortTransaction(void)
 		pgstat_report_xact_timestamp(0);
 	}
 
+	pgaio_at_xact_end( /* is_subxact = */ false, /* is_commit = */ false);
+
 	/*
 	 * State remains TRANS_ABORT until CleanupTransaction().
 	 */
@@ -5185,6 +5190,8 @@ CommitSubTransaction(void)
 	AtEOSubXact_PgStat(true, s->nestingLevel);
 	AtSubCommit_Snapshot(s->nestingLevel);
 
+	pgaio_at_xact_end( /* is_subxact = */ true, /* is_commit = */ true);
+
 	/*
 	 * We need to restore the upper transaction's read-only state, in case the
 	 * upper is read-write while the child is read-only; GUC will incorrectly
@@ -5351,6 +5358,8 @@ AbortSubTransaction(void)
 		AtSubAbort_Snapshot(s->nestingLevel);
 	}
 
+	pgaio_at_xact_end( /* is_subxact = */ true, /* is_commit = */ false);
+
 	/*
 	 * Restore the upper transaction's read-only state, too.  This should be
 	 * redundant with GUC's cleanup but we may as well do it for consistency
diff --git a/src/backend/storage/aio/Makefile b/src/backend/storage/aio/Makefile
index eaeaeeee8e3..89f821ea7e1 100644
--- a/src/backend/storage/aio/Makefile
+++ b/src/backend/storage/aio/Makefile
@@ -10,7 +10,11 @@ include $(top_builddir)/src/Makefile.global
 
 OBJS = \
 	aio.o \
+	aio_callback.o \
 	aio_init.o \
+	aio_io.o \
+	aio_target.o \
+	method_sync.o \
 	read_stream.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index f68cbc2b3f4..cefa888884c 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -3,6 +3,28 @@
  * aio.c
  *    AIO - Core Logic
  *
+ * For documentation about how AIO works on a higher level, including a
+ * schematic example, see README.md.
+ *
+ *
+ * AIO is a complicated subsystem. To keep things navigable it is split across
+ * a number of files:
+ *
+ * - method_*.c - different ways of executing AIO (e.g. worker process)
+ *
+ * - aio_target.c - IO on different kinds of targets
+ *
+ * - aio_io.c - method-independent code for specific IO ops (e.g. readv)
+ *
+ * - aio_callback.c - callbacks at IO operation lifecycle events
+ *
+ * - aio_init.c - per-server and per-backend initialization
+ *
+ * - aio.c - all other topics
+ *
+ * - read_stream.c - helper for reading buffered relation data
+ *
+ *
  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
@@ -14,8 +36,22 @@
 
 #include "postgres.h"
 
+#include "miscadmin.h"
+#include "port/atomics.h"
 #include "storage/aio.h"
+#include "storage/aio_internal.h"
 #include "utils/guc.h"
+#include "utils/resowner.h"
+#include "utils/wait_event_types.h"
+
+
+static inline void pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state);
+static void pgaio_io_reclaim(PgAioHandle *ioh);
+static void pgaio_io_resowner_register(PgAioHandle *ioh);
+static void pgaio_io_wait_for_free(void);
+static PgAioHandle *pgaio_io_from_wref(PgAioWaitRef *iow, uint64 *ref_generation);
+static const char *pgaio_io_state_get_name(PgAioHandleState s);
+static void pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation);
 
 
 /* Options for io_method. */
@@ -28,9 +64,877 @@ const struct config_enum_entry io_method_options[] = {
 int			io_method = DEFAULT_IO_METHOD;
 int			io_max_concurrency = -1;
 
+/* global control for AIO */
+PgAioCtl   *pgaio_ctl;
 
+/* current backend's per-backend state */
+PgAioBackend *pgaio_my_backend;
+
+
+static const IoMethodOps *const pgaio_method_ops_table[] = {
+	[IOMETHOD_SYNC] = &pgaio_sync_ops,
+};
+
+/* callbacks for the configured io_method, set by assign_io_method */
+const IoMethodOps *pgaio_method_ops;
+
+
+
+/* --------------------------------------------------------------------------------
+ * Public Functions related to PgAioHandle
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Acquire an AioHandle, waiting for IO completion if necessary.
+ *
+ * Each backend can only have one AIO handle that that has been "handed out"
+ * to code, but not yet submitted or released. This restriction is necessary
+ * to ensure that it is possible for code to wait for an unused handle by
+ * waiting for in-flight IO to complete. There is a limited number of handles
+ * in each backend, if multiple handles could be handed out without being
+ * submitted, waiting for all in-flight IO to complete would not guarantee
+ * that handles free up.
+ *
+ * It is cheap to acquire an IO handle, unless all handles are in use. In that
+ * case this function waits for the oldest IO to complete. In case that is not
+ * desirable, see pgaio_io_acquire_nb().
+ *
+ * If a handle was acquired but then does not turn out to be needed,
+ * e.g. because pgaio_io_acquire() is called before starting an IO in a
+ * critical section, the handle needs to be released with pgaio_io_release().
+ *
+ *
+ * To react to the completion of the IO as soon as it is known to have
+ * completed, callbacks can be registered with pgaio_io_register_callbacks().
+ *
+ * To actually execute IO using the returned handle, the pgaio_io_prep_*()
+ * family of functions is used. In many cases the pgaio_io_prep_*() call will
+ * not be done directly by code that acquired the handle, but by lower level
+ * code that gets passed the handle. E.g. if code in bufmgr.c wants to perform
+ * AIO, it typically will pass the handle to smgr., which will pass it on to
+ * md.c, on to fd.c, which then finally calls pgaio_io_prep_*().  This
+ * forwarding allows the various layers to react to the IO's completion by
+ * registering callbacks. These callbacks in turn can translate a lower
+ * layer's result into a result understandable by a higher layer.
+ *
+ * Once pgaio_io_prep_*() is called, the IO may be in the process of being
+ * executed and might even complete before the functions return. That is,
+ * however, not guaranteed, to allow IO submission to be batched. To guarantee
+ * IO submission pgaio_submit_staged() needs to be called.
+ *
+ * After pgaio_io_prep_*() the AioHandle is "consumed" and may not be
+ * referenced by the IO issuing code. To e.g. wait for IO, references to the
+ * IO can be established with pgaio_io_get_wref() *before* pgaio_io_prep_*()
+ * is called.  pgaio_wref_wait() can be used to wait for the IO to complete.
+ *
+ *
+ * To know if the IO [partially] succeeded or failed, a PgAioReturn * can be
+ * passed to pgaio_io_acquire(). Once the issuing backend has called
+ * pgaio_wref_wait(), the PgAioReturn contains information about whether the
+ * operation succeeded and details about the first failure, if any. The error
+ * can be raised / logged with pgaio_result_report().
+ *
+ * The lifetime of the memory pointed to be *ret needs to be at least as long
+ * as the passed in resowner. If the resowner releases resources before the IO
+ * completes (typically due to an error), the reference to *ret will be
+ * cleared. In case of resowner cleanup *ret will not be updated with the
+ * results of the IO operation.
+ */
+PgAioHandle *
+pgaio_io_acquire(struct ResourceOwnerData *resowner, PgAioReturn *ret)
+{
+	PgAioHandle *h;
+
+	while (true)
+	{
+		h = pgaio_io_acquire_nb(resowner, ret);
+
+		if (h != NULL)
+			return h;
+
+		/*
+		 * Evidently all handles by this backend are in use. Just wait for
+		 * some to complete.
+		 */
+		pgaio_io_wait_for_free();
+	}
+}
+
+/*
+ * Acquire an AioHandle, returning NULL if no handles are free.
+ *
+ * See pgaio_io_acquire(). The only difference is that this function will return
+ * NULL if there are no idle handles, instead of blocking.
+ */
+PgAioHandle *
+pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
+{
+	if (pgaio_my_backend->num_staged_ios >= PGAIO_SUBMIT_BATCH_SIZE)
+	{
+		Assert(pgaio_my_backend->num_staged_ios == PGAIO_SUBMIT_BATCH_SIZE);
+		pgaio_submit_staged();
+	}
+
+	if (pgaio_my_backend->handed_out_io)
+	{
+		ereport(ERROR,
+				errmsg("API violation: Only one IO can be handed out"));
+	}
+
+	if (!dclist_is_empty(&pgaio_my_backend->idle_ios))
+	{
+		dlist_node *ion = dclist_pop_head_node(&pgaio_my_backend->idle_ios);
+		PgAioHandle *ioh = dclist_container(PgAioHandle, node, ion);
+
+		Assert(ioh->state == PGAIO_HS_IDLE);
+		Assert(ioh->owner_procno == MyProcNumber);
+
+		pgaio_io_update_state(ioh, PGAIO_HS_HANDED_OUT);
+		pgaio_my_backend->handed_out_io = ioh;
+
+		if (resowner)
+			pgaio_io_resowner_register(ioh);
+
+		if (ret)
+		{
+			ioh->report_return = ret;
+			ret->result.status = ARS_UNKNOWN;
+		}
+
+		return ioh;
+	}
+
+	return NULL;
+}
+
+/*
+ * Release IO handle that turned out to not be required.
+ *
+ * See pgaio_io_acquire() for more details.
+ */
+void
+pgaio_io_release(PgAioHandle *ioh)
+{
+	if (ioh == pgaio_my_backend->handed_out_io)
+	{
+		Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+		Assert(ioh->resowner);
+
+		pgaio_my_backend->handed_out_io = NULL;
+		pgaio_io_reclaim(ioh);
+	}
+	else
+	{
+		elog(ERROR, "release in unexpected state");
+	}
+}
+
+/*
+ * Release IO handle during resource owner cleanup.
+ */
+void
+pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
+{
+	PgAioHandle *ioh = dlist_container(PgAioHandle, resowner_node, ioh_node);
+
+	Assert(ioh->resowner);
+
+	ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
+	ioh->resowner = NULL;
+
+	switch (ioh->state)
+	{
+		case PGAIO_HS_IDLE:
+			elog(ERROR, "unexpected");
+			break;
+		case PGAIO_HS_HANDED_OUT:
+			Assert(ioh == pgaio_my_backend->handed_out_io || pgaio_my_backend->handed_out_io == NULL);
+
+			if (ioh == pgaio_my_backend->handed_out_io)
+			{
+				pgaio_my_backend->handed_out_io = NULL;
+				if (!on_error)
+					elog(WARNING, "leaked AIO handle");
+			}
+
+			pgaio_io_reclaim(ioh);
+			break;
+		case PGAIO_HS_DEFINED:
+		case PGAIO_HS_STAGED:
+			/* XXX: Should we warn about this when is_commit? */
+			pgaio_submit_staged();
+			break;
+		case PGAIO_HS_SUBMITTED:
+		case PGAIO_HS_COMPLETED_IO:
+		case PGAIO_HS_COMPLETED_SHARED:
+		case PGAIO_HS_COMPLETED_LOCAL:
+			/* this is expected to happen */
+			break;
+	}
+
+	/*
+	 * Need to unregister the reporting of the IO's result, the memory it's
+	 * referencing likely has gone away.
+	 */
+	if (ioh->report_return)
+		ioh->report_return = NULL;
+}
+
+/*
+ * Add a [set of] flags to the IO.
+ *
+ * Note that this combines flags with already set flags, rather than set flags
+ * to explicitly the passed in parameters. This is to allow multiple callsites
+ * to set flags.
+ */
+void
+pgaio_io_set_flag(PgAioHandle *ioh, PgAioHandleFlags flag)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+
+	ioh->flags |= flag;
+}
+
+int
+pgaio_io_get_id(PgAioHandle *ioh)
+{
+	Assert(ioh >= pgaio_ctl->io_handles &&
+		   ioh < (pgaio_ctl->io_handles + pgaio_ctl->io_handle_count));
+	return ioh - pgaio_ctl->io_handles;
+}
+
+ProcNumber
+pgaio_io_get_owner(PgAioHandle *ioh)
+{
+	return ioh->owner_procno;
+}
+
+void
+pgaio_io_get_wref(PgAioHandle *ioh, PgAioWaitRef *iow)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT ||
+		   ioh->state == PGAIO_HS_DEFINED ||
+		   ioh->state == PGAIO_HS_STAGED);
+	Assert(ioh->generation != 0);
+
+	iow->aio_index = ioh - pgaio_ctl->io_handles;
+	iow->generation_upper = (uint32) (ioh->generation >> 32);
+	iow->generation_lower = (uint32) ioh->generation;
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Internal Functions related to PgAioHandle
+ * --------------------------------------------------------------------------------
+ */
+
+static inline void
+pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state)
+{
+	pgaio_debug_io(DEBUG4, ioh,
+				   "updating state to %s",
+				   pgaio_io_state_get_name(new_state));
+
+	/*
+	 * Ensure the changes signified by the new state are visible before the
+	 * new state becomes visible.
+	 */
+	pg_write_barrier();
+
+	ioh->state = new_state;
+}
+
+static void
+pgaio_io_resowner_register(PgAioHandle *ioh)
+{
+	Assert(!ioh->resowner);
+	Assert(CurrentResourceOwner);
+
+	ResourceOwnerRememberAioHandle(CurrentResourceOwner, &ioh->resowner_node);
+	ioh->resowner = CurrentResourceOwner;
+}
+
+/*
+ * Should only be called from pgaio_io_prep_*().
+ */
+void
+pgaio_io_stage(PgAioHandle *ioh, PgAioOp op)
+{
+	bool		needs_synchronous;
+
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+	Assert(pgaio_io_has_target(ioh));
+
+	ioh->op = op;
+	ioh->result = 0;
+
+	pgaio_io_update_state(ioh, PGAIO_HS_DEFINED);
+
+	/* allow a new IO to be staged */
+	pgaio_my_backend->handed_out_io = NULL;
+
+	pgaio_io_call_stage(ioh);
+
+	pgaio_io_update_state(ioh, PGAIO_HS_STAGED);
+
+	needs_synchronous = pgaio_io_needs_synchronous_execution(ioh);
+
+	pgaio_debug_io(DEBUG3, ioh,
+				   "prepared, executing synchronously: %d",
+				   needs_synchronous);
+
+	if (!needs_synchronous)
+	{
+		pgaio_my_backend->staged_ios[pgaio_my_backend->num_staged_ios++] = ioh;
+		Assert(pgaio_my_backend->num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
+	}
+	else
+	{
+		pgaio_io_prepare_submit(ioh);
+		pgaio_io_perform_synchronously(ioh);
+	}
+}
+
+bool
+pgaio_io_needs_synchronous_execution(PgAioHandle *ioh)
+{
+	if (ioh->flags & PGAIO_HF_SYNCHRONOUS)
+	{
+		/* XXX: should we also check if there are other IOs staged? */
+		return true;
+	}
+
+	if (pgaio_method_ops->needs_synchronous_execution)
+		return pgaio_method_ops->needs_synchronous_execution(ioh);
+	return false;
+}
+
+/*
+ * Handle IO being processed by IO method.
+ *
+ * Should be called by IO methods / synchronous IO execution, just before the
+ * IO is performed.
+ */
+void
+pgaio_io_prepare_submit(PgAioHandle *ioh)
+{
+	pgaio_io_update_state(ioh, PGAIO_HS_SUBMITTED);
+
+	dclist_push_tail(&pgaio_my_backend->in_flight_ios, &ioh->node);
+}
+
+/*
+ * Handle IO getting completed by a method.
+ *
+ * Should be called by IO methods / synchronous IO execution
+ */
+void
+pgaio_io_process_completion(PgAioHandle *ioh, int result)
+{
+	Assert(ioh->state == PGAIO_HS_SUBMITTED);
+
+	ioh->result = result;
+
+	pgaio_io_update_state(ioh, PGAIO_HS_COMPLETED_IO);
+
+	pgaio_io_call_complete_shared(ioh);
+
+	pgaio_io_update_state(ioh, PGAIO_HS_COMPLETED_SHARED);
+
+	/* condition variable broadcast ensures state is visible before wakeup */
+	ConditionVariableBroadcast(&ioh->cv);
+
+	if (ioh->owner_procno == MyProcNumber)
+		pgaio_io_reclaim(ioh);
+}
+
+bool
+pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
+{
+	*state = ioh->state;
+	pg_read_barrier();
+
+	return ioh->generation != ref_generation;
+}
+
+/*
+ * Wait for IO to complete. External code should never use this, outside of
+ * the AIO subsystem waits are only allowed via pgaio_wref_wait().
+ */
+static void
+pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
+{
+	PgAioHandleState state;
+	bool		am_owner;
+
+	am_owner = ioh->owner_procno == MyProcNumber;
+
+	if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+		return;
+
+	if (am_owner)
+	{
+		if (state == PGAIO_HS_STAGED)
+		{
+			/* XXX: Arguably this should be prevented by callers? */
+			pgaio_submit_staged();
+		}
+		else if (state != PGAIO_HS_SUBMITTED
+				 && state != PGAIO_HS_COMPLETED_IO
+				 && state != PGAIO_HS_COMPLETED_SHARED
+				 && state != PGAIO_HS_COMPLETED_LOCAL)
+		{
+			elog(PANIC, "waiting for own IO in wrong state: %d",
+				 state);
+		}
+	}
+
+	while (true)
+	{
+		if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+			return;
+
+		switch (state)
+		{
+			case PGAIO_HS_IDLE:
+			case PGAIO_HS_HANDED_OUT:
+				elog(ERROR, "IO in wrong state: %d", state);
+				break;
+
+			case PGAIO_HS_SUBMITTED:
+
+				/*
+				 * If we need to wait via the IO method, do so now. Don't
+				 * check via the IO method if the issuing backend is executing
+				 * the IO synchronously.
+				 */
+				if (pgaio_method_ops->wait_one && !(ioh->flags & PGAIO_HF_SYNCHRONOUS))
+				{
+					pgaio_method_ops->wait_one(ioh, ref_generation);
+					continue;
+				}
+				/* fallthrough */
+
+				/* waiting for owner to submit */
+			case PGAIO_HS_DEFINED:
+			case PGAIO_HS_STAGED:
+				/* waiting for reaper to complete */
+				/* fallthrough */
+			case PGAIO_HS_COMPLETED_IO:
+				/* shouldn't be able to hit this otherwise */
+				Assert(IsUnderPostmaster);
+				/* ensure we're going to get woken up */
+				ConditionVariablePrepareToSleep(&ioh->cv);
+
+				while (!pgaio_io_was_recycled(ioh, ref_generation, &state))
+				{
+					if (state == PGAIO_HS_COMPLETED_SHARED ||
+						state == PGAIO_HS_COMPLETED_LOCAL)
+						break;
+					ConditionVariableSleep(&ioh->cv, WAIT_EVENT_AIO_COMPLETION);
+				}
+
+				ConditionVariableCancelSleep();
+				break;
+
+			case PGAIO_HS_COMPLETED_SHARED:
+			case PGAIO_HS_COMPLETED_LOCAL:
+				/* see above */
+				if (am_owner)
+					pgaio_io_reclaim(ioh);
+				return;
+		}
+	}
+}
+
+static void
+pgaio_io_reclaim(PgAioHandle *ioh)
+{
+	/* This is only ok if it's our IO */
+	Assert(ioh->owner_procno == MyProcNumber);
+
+	pgaio_debug_io(DEBUG4, ioh,
+				   "reclaiming, result: %d, distilled_result: AFIXME, report to: %p",
+				   ioh->result,
+				   ioh->report_return);
+
+	if (ioh->state == PGAIO_HS_COMPLETED_SHARED)
+	{
+		pgaio_io_call_complete_local(ioh);
+		pgaio_io_update_state(ioh, PGAIO_HS_COMPLETED_LOCAL);
+	}
+
+	/* if the IO has been defined, we might need to do more work */
+	if (ioh->state != PGAIO_HS_HANDED_OUT)
+	{
+		dclist_delete_from(&pgaio_my_backend->in_flight_ios, &ioh->node);
+
+		if (ioh->report_return)
+		{
+			ioh->report_return->result = ioh->distilled_result;
+			ioh->report_return->target_data = ioh->target_data;
+		}
+	}
+
+	if (ioh->resowner)
+	{
+		ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
+		ioh->resowner = NULL;
+	}
+
+	Assert(!ioh->resowner);
+
+	ioh->op = PGAIO_OP_INVALID;
+	ioh->target = PGAIO_TID_INVALID;
+	ioh->flags = 0;
+	ioh->num_shared_callbacks = 0;
+	ioh->handle_data_len = 0;
+	ioh->report_return = NULL;
+	ioh->result = 0;
+	ioh->distilled_result.status = ARS_UNKNOWN;
+
+	/* XXX: the barrier is probably superfluous */
+	pg_write_barrier();
+	ioh->generation++;
+
+	pgaio_io_update_state(ioh, PGAIO_HS_IDLE);
+
+	/*
+	 * We push the IO to the head of the idle IO list, that seems more cache
+	 * efficient in cases where only a few IOs are used.
+	 */
+	dclist_push_head(&pgaio_my_backend->idle_ios, &ioh->node);
+}
+
+static void
+pgaio_io_wait_for_free(void)
+{
+	int			reclaimed = 0;
+
+	pgaio_debug(DEBUG2, "waiting for self with %d pending",
+				pgaio_my_backend->num_staged_ios);
+
+	/*
+	 * First check if any of our IOs actually have completed - when using
+	 * worker, that'll often be the case. We could do so as part of the loop
+	 * below, but that'd potentially lead us to wait for some IO submitted
+	 * before.
+	 */
+	for (int i = 0; i < io_max_concurrency; i++)
+	{
+		PgAioHandle *ioh = &pgaio_ctl->io_handles[pgaio_my_backend->io_handle_off + i];
+
+		if (ioh->state == PGAIO_HS_COMPLETED_SHARED)
+		{
+			pgaio_io_reclaim(ioh);
+			reclaimed++;
+		}
+	}
+
+	if (reclaimed > 0)
+		return;
+
+	/*
+	 * If we have any unsubmitted IOs, submit them now. We'll start waiting in
+	 * a second, so it's better they're in flight. This also addresses the
+	 * edge-case that all IOs are unsubmitted.
+	 */
+	if (pgaio_my_backend->num_staged_ios > 0)
+	{
+		pgaio_submit_staged();
+	}
+
+	/*
+	 * It's possible that we recognized there were free IOs while submitting.
+	 */
+	if (dclist_count(&pgaio_my_backend->in_flight_ios) == 0)
+	{
+		elog(ERROR, "no free IOs despite no in-flight IOs");
+	}
+
+	/*
+	 * Wait for the oldest in-flight IO to complete.
+	 *
+	 * XXX: Reusing the general IO wait is suboptimal, we don't need to wait
+	 * for that specific IO to complete, we just need *any* IO to complete.
+	 */
+	{
+		PgAioHandle *ioh = dclist_head_element(PgAioHandle, node, &pgaio_my_backend->in_flight_ios);
+
+		switch (ioh->state)
+		{
+				/* should not be in in-flight list */
+			case PGAIO_HS_IDLE:
+			case PGAIO_HS_DEFINED:
+			case PGAIO_HS_HANDED_OUT:
+			case PGAIO_HS_STAGED:
+			case PGAIO_HS_COMPLETED_LOCAL:
+				elog(ERROR, "shouldn't get here with io:%d in state %d",
+					 pgaio_io_get_id(ioh), ioh->state);
+				break;
+
+			case PGAIO_HS_COMPLETED_IO:
+			case PGAIO_HS_SUBMITTED:
+				pgaio_debug_io(DEBUG2, ioh,
+							   "waiting for free io with %d in flight",
+							   dclist_count(&pgaio_my_backend->in_flight_ios));
+
+				/*
+				 * In a more general case this would be racy, because the
+				 * generation could increase after we read ioh->state above.
+				 * But we are only looking at IOs by the current backend and
+				 * the IO can only be recycled by this backend.
+				 */
+				pgaio_io_wait(ioh, ioh->generation);
+				break;
+
+			case PGAIO_HS_COMPLETED_SHARED:
+				/* it's possible that another backend just finished this IO */
+				pgaio_io_reclaim(ioh);
+				break;
+		}
+
+		if (dclist_count(&pgaio_my_backend->idle_ios) == 0)
+			elog(PANIC, "no idle IOs after waiting");
+		return;
+	}
+}
+
+/*
+ * Internal - code outside of AIO should never need this and it'd be hard for
+ * such code to be safe.
+ */
+static PgAioHandle *
+pgaio_io_from_wref(PgAioWaitRef *iow, uint64 *ref_generation)
+{
+	PgAioHandle *ioh;
+
+	Assert(iow->aio_index < pgaio_ctl->io_handle_count);
+
+	ioh = &pgaio_ctl->io_handles[iow->aio_index];
+
+	*ref_generation = ((uint64) iow->generation_upper) << 32 |
+		iow->generation_lower;
+
+	Assert(*ref_generation != 0);
+
+	return ioh;
+}
+
+static const char *
+pgaio_io_state_get_name(PgAioHandleState s)
+{
+#define PGAIO_HS_TOSTR_CASE(sym) case PGAIO_HS_##sym: return #sym
+	switch (s)
+	{
+			PGAIO_HS_TOSTR_CASE(IDLE);
+			PGAIO_HS_TOSTR_CASE(HANDED_OUT);
+			PGAIO_HS_TOSTR_CASE(DEFINED);
+			PGAIO_HS_TOSTR_CASE(STAGED);
+			PGAIO_HS_TOSTR_CASE(SUBMITTED);
+			PGAIO_HS_TOSTR_CASE(COMPLETED_IO);
+			PGAIO_HS_TOSTR_CASE(COMPLETED_SHARED);
+			PGAIO_HS_TOSTR_CASE(COMPLETED_LOCAL);
+	}
+#undef PGAIO_HS_TOSTR_CASE
+
+	return NULL;				/* silence compiler */
+}
+
+const char *
+pgaio_io_get_state_name(PgAioHandle *ioh)
+{
+	return pgaio_io_state_get_name(ioh->state);
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Functions primarily related to IO Wait References
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_wref_clear(PgAioWaitRef *iow)
+{
+	iow->aio_index = PG_UINT32_MAX;
+}
+
+bool
+pgaio_wref_valid(PgAioWaitRef *iow)
+{
+	return iow->aio_index != PG_UINT32_MAX;
+}
+
+int
+pgaio_wref_get_id(PgAioWaitRef *iow)
+{
+	Assert(pgaio_wref_valid(iow));
+	return iow->aio_index;
+}
+
+/*
+ * Wait for the IO to have completed.
+ */
+void
+pgaio_wref_wait(PgAioWaitRef *iow)
+{
+	uint64		ref_generation;
+	PgAioHandle *ioh;
+
+	ioh = pgaio_io_from_wref(iow, &ref_generation);
+
+	pgaio_io_wait(ioh, ref_generation);
+}
+
+/*
+ * Check if the the referenced IO completed, without blocking.
+ */
+bool
+pgaio_wref_check_done(PgAioWaitRef *iow)
+{
+	uint64		ref_generation;
+	PgAioHandleState state;
+	bool		am_owner;
+	PgAioHandle *ioh;
+
+	ioh = pgaio_io_from_wref(iow, &ref_generation);
+
+	if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+		return true;
+
+	if (state == PGAIO_HS_IDLE)
+		return true;
+
+	am_owner = ioh->owner_procno == MyProcNumber;
+
+	if (state == PGAIO_HS_COMPLETED_SHARED ||
+		state == PGAIO_HS_COMPLETED_LOCAL)
+	{
+		if (am_owner)
+			pgaio_io_reclaim(ioh);
+		return true;
+	}
+
+	return false;
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Actions on multiple IOs.
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_submit_staged(void)
+{
+	int			total_submitted = 0;
+	int			did_submit;
+
+	if (pgaio_my_backend->num_staged_ios == 0)
+		return;
+
+
+	START_CRIT_SECTION();
+
+	did_submit = pgaio_method_ops->submit(pgaio_my_backend->num_staged_ios,
+										  pgaio_my_backend->staged_ios);
+
+	END_CRIT_SECTION();
+
+	total_submitted += did_submit;
+
+	Assert(total_submitted == did_submit);
+
+	pgaio_my_backend->num_staged_ios = 0;
+
+	pgaio_debug(DEBUG4,
+				"aio: submitted %d IOs",
+				total_submitted);
+}
+
+bool
+pgaio_have_staged(void)
+{
+	return pgaio_my_backend->num_staged_ios > 0;
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Other
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Need to submit staged but not yet submitted IOs using the fd, otherwise
+ * the IO would end up targeting something bogus.
+ */
+void
+pgaio_closing_fd(int fd)
+{
+	/*
+	 * Might be called before AIO is initialized or in a subprocess that
+	 * doesn't use AIO.
+	 */
+	if (!pgaio_my_backend)
+		return;
+
+	/*
+	 * For now just submit all staged IOs - we could be more selective, but
+	 * it's probably not worth it.
+	 */
+	pgaio_submit_staged();
+}
+
+void
+pgaio_at_xact_end(bool is_subxact, bool is_commit)
+{
+	Assert(!pgaio_my_backend->handed_out_io);
+}
+
+/*
+ * Similar to pgaio_at_xact_end(..., is_commit = false), but for cases where
+ * errors happen outside of transactions.
+ */
+void
+pgaio_at_error(void)
+{
+	Assert(!pgaio_my_backend->handed_out_io);
+}
+
+void
+pgaio_shutdown(int code, Datum arg)
+{
+	Assert(pgaio_my_backend);
+	Assert(!pgaio_my_backend->handed_out_io);
+
+	/*
+	 * Before exiting, make sure that all IOs are finished. That has two main
+	 * purposes: - it's somewhat annoying to see partially finished IOs in
+	 * stats views etc - it's rumored that some kernel-level AIO mechanisms
+	 * don't deal well with the issuer of an AIO exiting
+	 */
+
+	while (!dclist_is_empty(&pgaio_my_backend->in_flight_ios))
+	{
+		PgAioHandle *ioh = dclist_head_element(PgAioHandle, node, &pgaio_my_backend->in_flight_ios);
+
+		/* see comment in pgaio_io_wait_for_free() about raciness */
+		pgaio_io_wait(ioh, ioh->generation);
+	}
+
+	pgaio_my_backend = NULL;
+}
 
 void
 assign_io_method(int newval, void *extra)
 {
+	Assert(pgaio_method_ops_table[newval] != NULL);
+	Assert(newval < lengthof(io_method_options));
+
+	pgaio_method_ops = pgaio_method_ops_table[newval];
 }
diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c
new file mode 100644
index 00000000000..93f71690169
--- /dev/null
+++ b/src/backend/storage/aio/aio_callback.c
@@ -0,0 +1,280 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_callback.c
+ *	  AIO - Functionality related to callbacks that can be registered on IO
+ *	  Handles
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *    src/backend/storage/aio/aio_callback.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+#include "utils/memutils.h"
+
+
+/* just to have something to put into the aio_handle_cbs */
+static const struct PgAioHandleCallbacks aio_invalid_cb = {0};
+
+typedef struct PgAioHandleCallbacksEntry
+{
+	const PgAioHandleCallbacks *const cb;
+	const char *const name;
+} PgAioHandleCallbacksEntry;
+
+/*
+ * Callback definition for the callbacks that can be registered on an IO
+ * handle.  See PgAioHandleCallbackID's definition for an explanation for why
+ * callbacks are not identified by a pointer.
+ */
+static const PgAioHandleCallbacksEntry aio_handle_cbs[] = {
+#define CALLBACK_ENTRY(id, callback)  [id] = {.cb = &callback, .name = #callback}
+	CALLBACK_ENTRY(PGAIO_HCB_INVALID, aio_invalid_cb),
+#undef CALLBACK_ENTRY
+};
+
+
+
+/*
+ * Register callback for the IO handle.
+ *
+ * Only a limited number (PGAIO_HANDLE_MAX_CALLBACKS) of callbacks can be
+ * registered for each IO.
+ *
+ * Callbacks need to be registered before [indirectly] calling
+ * pgaio_io_prep_*(), as the IO may be executed immediately.
+ *
+ *
+ * Note that callbacks are executed in critical sections.  This is necessary
+ * to be able to execute IO in critical sections (consider e.g. WAL
+ * logging). To perform AIO we first need to acquire a handle, which, if there
+ * are no free handles, requires waiting for IOs to complete and to execute
+ * their completion callbacks.
+ *
+ * Callbacks may be executed in the issuing backend but also in another
+ * backend (because that backend is waiting for the IO) or in IO workers (if
+ * io_method=worker is used).
+ *
+ *
+ * See PgAioHandleCallbackID's definition for an explanation for why
+ * callbacks are not identified by a pointer.
+ */
+void
+pgaio_io_register_callbacks(PgAioHandle *ioh, PgAioHandleCallbackID cbid)
+{
+	const PgAioHandleCallbacksEntry *ce = &aio_handle_cbs[cbid];
+
+	if (cbid >= lengthof(aio_handle_cbs))
+		elog(ERROR, "callback %d is out of range", cbid);
+	if (aio_handle_cbs[cbid].cb->complete_shared == NULL &&
+		aio_handle_cbs[cbid].cb->complete_local == NULL)
+		elog(ERROR, "callback %d does not have completion callback", cbid);
+	if (ioh->num_shared_callbacks >= PGAIO_HANDLE_MAX_CALLBACKS)
+		elog(PANIC, "too many callbacks, the max is %d", PGAIO_HANDLE_MAX_CALLBACKS);
+	ioh->shared_callbacks[ioh->num_shared_callbacks] = cbid;
+
+	pgaio_debug_io(DEBUG3, ioh,
+				   "adding cb #%d, id %d/%s",
+				   ioh->num_shared_callbacks + 1,
+				   cbid, ce->name);
+
+	ioh->num_shared_callbacks++;
+}
+
+/*
+ * Associate an array of data with the Handle. This is e.g. useful to the
+ * transport knowledge about which buffers a multi-block IO affects to
+ * completion callbacks.
+ *
+ * Right now this can be done only once for each IO, even though multiple
+ * callbacks can be registered. There aren't any known usecases requiring more
+ * and the required amount of shared memory does add up, so it doesn't seem
+ * worth multiplying memory usage by PGAIO_HANDLE_MAX_CALLBACKS.
+ */
+void
+pgaio_io_set_handle_data_64(PgAioHandle *ioh, uint64 *data, uint8 len)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+	Assert(ioh->handle_data_len == 0);
+	Assert(len <= PG_IOV_MAX);
+
+	for (int i = 0; i < len; i++)
+		pgaio_ctl->handle_data[ioh->iovec_off + i] = data[i];
+	ioh->handle_data_len = len;
+}
+
+/*
+ * Convenience version of pgaio_io_set_handle_data_64() that converts a 32bit
+ * array to a 64bit array. Without it callers would end up needing to
+ * open-code equivalent code.
+ */
+void
+pgaio_io_set_handle_data_32(PgAioHandle *ioh, uint32 *data, uint8 len)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+	Assert(ioh->handle_data_len == 0);
+	Assert(len <= PG_IOV_MAX);
+
+	for (int i = 0; i < len; i++)
+		pgaio_ctl->handle_data[ioh->iovec_off + i] = data[i];
+	ioh->handle_data_len = len;
+}
+
+/*
+ * Return data set with pgaio_io_set_handle_data_*().
+ */
+uint64 *
+pgaio_io_get_handle_data(PgAioHandle *ioh, uint8 *len)
+{
+	Assert(ioh->handle_data_len > 0);
+
+	*len = ioh->handle_data_len;
+
+	return &pgaio_ctl->handle_data[ioh->iovec_off];
+}
+
+/*
+ * Internal function which invokes ->stage for all the registered callbacks.
+ */
+void
+pgaio_io_call_stage(PgAioHandle *ioh)
+{
+	Assert(ioh->target > PGAIO_TID_INVALID && ioh->target < PGAIO_TID_COUNT);
+	Assert(ioh->op > PGAIO_OP_INVALID && ioh->op < PGAIO_OP_COUNT);
+
+	for (int i = ioh->num_shared_callbacks; i > 0; i--)
+	{
+		PgAioHandleCallbackID cbid = ioh->shared_callbacks[i - 1];
+		const PgAioHandleCallbacksEntry *ce = &aio_handle_cbs[cbid];
+
+		if (!ce->cb->stage)
+			continue;
+
+		pgaio_debug_io(DEBUG3, ioh,
+					   "calling cb #%d %d/%s->stage",
+					   i, cbid, ce->name);
+		ce->cb->stage(ioh);
+	}
+}
+
+/*
+ * Internal function which invokes ->complete_shared for all the registered
+ * callbacks.
+ */
+void
+pgaio_io_call_complete_shared(PgAioHandle *ioh)
+{
+	PgAioResult result;
+
+	START_CRIT_SECTION();
+
+	Assert(ioh->target > PGAIO_TID_INVALID && ioh->target < PGAIO_TID_COUNT);
+	Assert(ioh->op > PGAIO_OP_INVALID && ioh->op < PGAIO_OP_COUNT);
+
+	result.status = ARS_OK;		/* low level IO is always considered OK */
+	result.result = ioh->result;
+	result.id = PGAIO_HCB_INVALID;
+	result.error_data = 0;
+
+	for (int i = ioh->num_shared_callbacks; i > 0; i--)
+	{
+		PgAioHandleCallbackID cbid = ioh->shared_callbacks[i - 1];
+		const PgAioHandleCallbacksEntry *ce = &aio_handle_cbs[cbid];
+
+		if (!ce->cb->complete_shared)
+			continue;
+
+		pgaio_debug_io(DEBUG3, ioh,
+					   "calling cb #%d, id %d/%s->complete_shared with distilled result status %d, id %u, error_data: %d, result: %d",
+					   i, cbid, ce->name,
+					   result.status, result.id, result.error_data, result.result);
+		result = ce->cb->complete_shared(ioh, result);
+	}
+
+	ioh->distilled_result = result;
+
+	pgaio_debug_io(DEBUG3, ioh,
+				   "distilled result status %d, id %u, error_data: %d, result: %d, raw_result %d",
+				   result.status, result.id, result.error_data, result.result,
+				   ioh->result);
+
+	END_CRIT_SECTION();
+}
+
+
+/*
+ * Internal function which invokes ->complete_local for all the registered
+ * callbacks.
+ *
+ * XXX: It'd be nice to deduplicate with pgaio_io_call_complete_shared().
+ */
+void
+pgaio_io_call_complete_local(PgAioHandle *ioh)
+{
+	PgAioResult result;
+
+	START_CRIT_SECTION();
+
+	Assert(ioh->target > PGAIO_TID_INVALID && ioh->target < PGAIO_TID_COUNT);
+	Assert(ioh->op > PGAIO_OP_INVALID && ioh->op < PGAIO_OP_COUNT);
+
+	/* start with distilled result from shared callback */
+	result = ioh->distilled_result;
+
+	for (int i = ioh->num_shared_callbacks; i > 0; i--)
+	{
+		PgAioHandleCallbackID cbid = ioh->shared_callbacks[i - 1];
+		const PgAioHandleCallbacksEntry *ce = &aio_handle_cbs[cbid];
+
+		if (!ce->cb->complete_local)
+			continue;
+
+		pgaio_debug_io(DEBUG3, ioh,
+					   "calling cb #%d, id %d/%s->complete_local with distilled result status %d, id %u, error_data: %d, result: %d",
+					   i, cbid, ce->name,
+					   result.status, result.id, result.error_data, result.result);
+		result = ce->cb->complete_local(ioh, result);
+	}
+
+	/*
+	 * Note that we don't save the result in ioh->distilled_result, the local
+	 * callback's result should not ever matter to other waiters.
+	 */
+	pgaio_debug_io(DEBUG3, ioh,
+				   "distilled result status %d, id %u, error_data: %d, result: %d, raw_result %d",
+				   result.status, result.id, result.error_data, result.result,
+				   ioh->result);
+
+	END_CRIT_SECTION();
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO Result
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_result_report(PgAioResult result, const PgAioTargetData *target_data, int elevel)
+{
+	PgAioHandleCallbackID cbid = result.id;
+	const PgAioHandleCallbacksEntry *ce = &aio_handle_cbs[cbid];
+
+	Assert(result.status != ARS_UNKNOWN);
+	Assert(result.status != ARS_OK);
+
+	if (ce->cb->report == NULL)
+		elog(ERROR, "callback %d/%s does not have report callback",
+			 result.id, ce->name);
+
+	ce->cb->report(result, target_data, elevel);
+}
diff --git a/src/backend/storage/aio/aio_init.c b/src/backend/storage/aio/aio_init.c
index f7ee8270756..0e98cc0c8fb 100644
--- a/src/backend/storage/aio/aio_init.c
+++ b/src/backend/storage/aio/aio_init.c
@@ -14,24 +14,210 @@
 
 #include "postgres.h"
 
+#include "miscadmin.h"
+#include "storage/aio.h"
 #include "storage/aio_init.h"
+#include "storage/aio_internal.h"
+#include "storage/ipc.h"
+#include "storage/proc.h"
+#include "storage/shmem.h"
+#include "utils/guc.h"
 
 
 
+static Size
+AioCtlShmemSize(void)
+{
+	Size		sz;
+
+	/* pgaio_ctl itself */
+	sz = offsetof(PgAioCtl, io_handles);
+
+	return sz;
+}
+
+static uint32
+AioProcs(void)
+{
+	return MaxBackends + NUM_AUXILIARY_PROCS;
+}
+
+static Size
+AioBackendShmemSize(void)
+{
+	return mul_size(AioProcs(), sizeof(PgAioBackend));
+}
+
+static Size
+AioHandleShmemSize(void)
+{
+	Size		sz;
+
+	/* ios */
+	sz = mul_size(AioProcs(),
+				  mul_size(io_max_concurrency, sizeof(PgAioHandle)));
+
+	return sz;
+}
+
+static Size
+AioHandleIOVShmemSize(void)
+{
+	return mul_size(sizeof(struct iovec),
+					mul_size(mul_size(PG_IOV_MAX, AioProcs()),
+							 io_max_concurrency));
+}
+
+static Size
+AioHandleDataShmemSize(void)
+{
+	return mul_size(sizeof(uint64),
+					mul_size(mul_size(PG_IOV_MAX, AioProcs()),
+							 io_max_concurrency));
+}
+
+/*
+ * Choose a suitable value for io_max_concurrency.
+ *
+ * It's unlikely that we could have more IOs in flight than buffers that we
+ * would be allowed to pin.
+ *
+ * On the upper end, apply a cap too - just because shared_buffers is large,
+ * it doesn't make sense have millions of buffers undergo IO concurrently.
+ */
+static int
+AioChooseMaxConccurrency(void)
+{
+	uint32		max_backends;
+	int			max_proportional_pins;
+
+	/* Similar logic to LimitAdditionalPins() */
+	max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
+	max_proportional_pins = NBuffers / max_backends;
+
+	max_proportional_pins = Max(max_proportional_pins, 1);
+
+	/* apply upper limit */
+	return Min(max_proportional_pins, 64);
+}
+
 Size
 AioShmemSize(void)
 {
 	Size		sz = 0;
 
+	/*
+	 * We prefer to report this value's source as PGC_S_DYNAMIC_DEFAULT.
+	 * However, if the DBA explicitly set wal_buffers = -1 in the config file,
+	 * then PGC_S_DYNAMIC_DEFAULT will fail to override that and we must force
+	 *
+	 */
+	if (io_max_concurrency == -1)
+	{
+		char		buf[32];
+
+		snprintf(buf, sizeof(buf), "%d", AioChooseMaxConccurrency());
+		SetConfigOption("io_max_concurrency", buf, PGC_POSTMASTER,
+						PGC_S_DYNAMIC_DEFAULT);
+		if (io_max_concurrency == -1)	/* failed to apply it? */
+			SetConfigOption("io_max_concurrency", buf, PGC_POSTMASTER,
+							PGC_S_OVERRIDE);
+	}
+
+	sz = add_size(sz, AioCtlShmemSize());
+	sz = add_size(sz, AioBackendShmemSize());
+	sz = add_size(sz, AioHandleShmemSize());
+	sz = add_size(sz, AioHandleIOVShmemSize());
+	sz = add_size(sz, AioHandleDataShmemSize());
+
+	if (pgaio_method_ops->shmem_size)
+		sz = add_size(sz, pgaio_method_ops->shmem_size());
+
 	return sz;
 }
 
 void
 AioShmemInit(void)
 {
+	bool		found;
+	uint32		io_handle_off = 0;
+	uint32		iovec_off = 0;
+	uint32		per_backend_iovecs = io_max_concurrency * PG_IOV_MAX;
+
+	pgaio_ctl = (PgAioCtl *)
+		ShmemInitStruct("AioCtl", AioCtlShmemSize(), &found);
+
+	if (found)
+		goto out;
+
+	memset(pgaio_ctl, 0, AioCtlShmemSize());
+
+	pgaio_ctl->io_handle_count = AioProcs() * io_max_concurrency;
+	pgaio_ctl->iovec_count = AioProcs() * per_backend_iovecs;
+
+	pgaio_ctl->backend_state = (PgAioBackend *)
+		ShmemInitStruct("AioBackend", AioBackendShmemSize(), &found);
+
+	pgaio_ctl->io_handles = (PgAioHandle *)
+		ShmemInitStruct("AioHandle", AioHandleShmemSize(), &found);
+
+	pgaio_ctl->iovecs = (struct iovec *)
+		ShmemInitStruct("AioHandleIOV", AioHandleIOVShmemSize(), &found);
+	pgaio_ctl->handle_data = (uint64 *)
+		ShmemInitStruct("AioHandleData", AioHandleDataShmemSize(), &found);
+
+	for (int procno = 0; procno < AioProcs(); procno++)
+	{
+		PgAioBackend *bs = &pgaio_ctl->backend_state[procno];
+
+		bs->io_handle_off = io_handle_off;
+		io_handle_off += io_max_concurrency;
+
+		dclist_init(&bs->idle_ios);
+		memset(bs->staged_ios, 0, sizeof(PgAioHandle *) * PGAIO_SUBMIT_BATCH_SIZE);
+		dclist_init(&bs->in_flight_ios);
+
+		/* initialize per-backend IOs */
+		for (int i = 0; i < io_max_concurrency; i++)
+		{
+			PgAioHandle *ioh = &pgaio_ctl->io_handles[bs->io_handle_off + i];
+
+			ioh->generation = 1;
+			ioh->owner_procno = procno;
+			ioh->iovec_off = iovec_off;
+			ioh->handle_data_len = 0;
+			ioh->report_return = NULL;
+			ioh->resowner = NULL;
+			ioh->num_shared_callbacks = 0;
+			ioh->distilled_result.status = ARS_UNKNOWN;
+			ioh->flags = 0;
+
+			ConditionVariableInit(&ioh->cv);
+
+			dclist_push_tail(&bs->idle_ios, &ioh->node);
+			iovec_off += PG_IOV_MAX;
+		}
+	}
+
+out:
+	/* Initialize IO method specific resources. */
+	if (pgaio_method_ops->shmem_init)
+		pgaio_method_ops->shmem_init(!found);
 }
 
 void
 pgaio_init_backend(void)
 {
+	/* shouldn't be initialized twice */
+	Assert(!pgaio_my_backend);
+
+	if (MyProc == NULL || MyProcNumber >= AioProcs())
+		elog(ERROR, "aio requires a normal PGPROC");
+
+	pgaio_my_backend = &pgaio_ctl->backend_state[MyProcNumber];
+
+	if (pgaio_method_ops->init_backend)
+		pgaio_method_ops->init_backend();
+
+	before_shmem_exit(pgaio_shutdown, 0);
 }
diff --git a/src/backend/storage/aio/aio_io.c b/src/backend/storage/aio/aio_io.c
new file mode 100644
index 00000000000..bb010d6152c
--- /dev/null
+++ b/src/backend/storage/aio/aio_io.c
@@ -0,0 +1,175 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_io.c
+ *    AIO - Low Level IO Handling
+ *
+ * Functions related to associating IO operations to IO Handles and IO-method
+ * independent support functions for actually performing IO.
+ *
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *    src/backend/storage/aio/aio_io.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+#include "storage/fd.h"
+#include "utils/wait_event.h"
+
+
+static void pgaio_io_before_prep(PgAioHandle *ioh);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Public IO related functions operating on IO Handles
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Scatter/gather IO needs to associate an iovec with the Handle. To support
+ * worker mode this data needs to be in shared memory.
+ *
+ * XXX: Right now the amount of space available for each IO is
+ * PG_IOV_MAX. While it's tempting to use the io_combine_limit GUC, that's
+ * PGC_USERSET, so we can't allocate shared memory based on that.
+ */
+int
+pgaio_io_get_iovec(PgAioHandle *ioh, struct iovec **iov)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+
+	*iov = &pgaio_ctl->iovecs[ioh->iovec_off];
+
+	return PG_IOV_MAX;
+}
+
+PgAioOpData *
+pgaio_io_get_op_data(PgAioHandle *ioh)
+{
+	return &ioh->op_data;
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * "Preparation" routines for individual IO operations
+ *
+ * These are called by the code actually initiating an IO, to associate the IO
+ * specific data with an AIO handle.
+ *
+ * Each of the preparation routines first needs to call
+ * pgaio_io_before_prep(), then fill IO specific fields in the handle and then
+ * finally call pgaio_io_stage().
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_io_prep_readv(PgAioHandle *ioh,
+					int fd, int iovcnt, uint64 offset)
+{
+	pgaio_io_before_prep(ioh);
+
+	ioh->op_data.read.fd = fd;
+	ioh->op_data.read.offset = offset;
+	ioh->op_data.read.iov_length = iovcnt;
+
+	pgaio_io_stage(ioh, PGAIO_OP_READV);
+}
+
+void
+pgaio_io_prep_writev(PgAioHandle *ioh,
+					 int fd, int iovcnt, uint64 offset)
+{
+	pgaio_io_before_prep(ioh);
+
+	ioh->op_data.write.fd = fd;
+	ioh->op_data.write.offset = offset;
+	ioh->op_data.write.iov_length = iovcnt;
+
+	pgaio_io_stage(ioh, PGAIO_OP_WRITEV);
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Internal IO related functions operating on IO Handles
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Execute IO operation synchronously. This is implemented here, not in
+ * method_sync.c, because other IO methods lso might use it / fall back to it.
+ */
+void
+pgaio_io_perform_synchronously(PgAioHandle *ioh)
+{
+	ssize_t		result = 0;
+	struct iovec *iov = &pgaio_ctl->iovecs[ioh->iovec_off];
+
+	/* Perform IO. */
+	switch (ioh->op)
+	{
+		case PGAIO_OP_READV:
+			pgstat_report_wait_start(WAIT_EVENT_DATA_FILE_READ);
+			result = pg_preadv(ioh->op_data.read.fd, iov,
+							   ioh->op_data.read.iov_length,
+							   ioh->op_data.read.offset);
+			pgstat_report_wait_end();
+			break;
+		case PGAIO_OP_WRITEV:
+			pgstat_report_wait_start(WAIT_EVENT_DATA_FILE_WRITE);
+			result = pg_pwritev(ioh->op_data.write.fd, iov,
+								ioh->op_data.write.iov_length,
+								ioh->op_data.write.offset);
+			pgstat_report_wait_end();
+			break;
+		case PGAIO_OP_INVALID:
+			elog(ERROR, "trying to execute invalid IO operation");
+	}
+
+	ioh->result = result < 0 ? -errno : result;
+
+	pgaio_io_process_completion(ioh, ioh->result);
+}
+
+/*
+ * Helper function to be called by IO operation preparation functions, before
+ * any data in the handle is set.  Mostly to centralize assertions.
+ */
+static void
+pgaio_io_before_prep(PgAioHandle *ioh)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+	Assert(pgaio_io_has_target(ioh));
+	Assert(ioh->op == PGAIO_OP_INVALID);
+}
+
+/*
+ * Could be made part of the public interface, but it's not clear there's
+ * really a use case for that.
+ */
+const char *
+pgaio_io_get_op_name(PgAioHandle *ioh)
+{
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	switch (ioh->op)
+	{
+		case PGAIO_OP_INVALID:
+			return "invalid";
+		case PGAIO_OP_READV:
+			return "read";
+		case PGAIO_OP_WRITEV:
+			return "write";
+	}
+
+	return NULL;				/* silence compiler */
+}
diff --git a/src/backend/storage/aio/aio_target.c b/src/backend/storage/aio/aio_target.c
new file mode 100644
index 00000000000..15428968e58
--- /dev/null
+++ b/src/backend/storage/aio/aio_target.c
@@ -0,0 +1,108 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_target.c
+ *	  AIO - Functionality related to executing IO for different targets
+ *
+ * XXX Write me
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *    src/backend/storage/aio/aio_target.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+
+
+/*
+ * Registry for entities that can be the target of AIO.
+ *
+ * To support executing using worker processes, the file descriptor for an IO
+ * may need to be be reopened in a different process. This is done via the
+ * PgAioTargetInfo.reopen callback.
+ */
+static const PgAioTargetInfo *pgaio_target_info[] = {
+	[PGAIO_TID_INVALID] = &(PgAioTargetInfo) {
+		.name = "invalid",
+	},
+};
+
+
+
+bool
+pgaio_io_has_target(PgAioHandle *ioh)
+{
+	return ioh->target != PGAIO_TID_INVALID;
+}
+
+/*
+ * Return the name for the target associated with the IO. Mostly useful for
+ * debugging/logging.
+ */
+const char *
+pgaio_io_get_target_name(PgAioHandle *ioh)
+{
+	Assert(ioh->target >= 0 && ioh->target < PGAIO_TID_COUNT);
+
+	return pgaio_target_info[ioh->target]->name;
+}
+
+/*
+ * Assign a target to the IO.
+ *
+ * This has to be called exactly once before pgaio_io_prep_*() is called.
+ */
+void
+pgaio_io_set_target(PgAioHandle *ioh, PgAioTargetID targetid)
+{
+	Assert(ioh->state == PGAIO_HS_HANDED_OUT);
+	Assert(ioh->target == PGAIO_TID_INVALID);
+
+	ioh->target = targetid;
+}
+
+PgAioTargetData *
+pgaio_io_get_target_data(PgAioHandle *ioh)
+{
+	return &ioh->target_data;
+}
+
+/*
+ * Return a stringified description of the IO's target.
+ *
+ * The string is localized and allocated in the current memory context.
+ */
+char *
+pgaio_io_get_target_description(PgAioHandle *ioh)
+{
+	return pgaio_target_info[ioh->target]->describe_identity(&ioh->target_data);
+}
+
+/*
+ * Internal: Check if pgaio_io_reopen() is available for the IO.
+ */
+bool
+pgaio_io_can_reopen(PgAioHandle *ioh)
+{
+	return pgaio_target_info[ioh->target]->reopen != NULL;
+}
+
+/*
+ * Internal: Before executing an IO outside of the context of the process the
+ * IO has been prepared in, the file descriptor has to be reopened - any FD
+ * referenced in the IO itself, won't be valid in the separate process.
+ */
+void
+pgaio_io_reopen(PgAioHandle *ioh)
+{
+	Assert(ioh->target >= 0 && ioh->target < PGAIO_TID_COUNT);
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	pgaio_target_info[ioh->target]->reopen(ioh);
+}
diff --git a/src/backend/storage/aio/meson.build b/src/backend/storage/aio/meson.build
index c822fd4ddf7..2c26089d52e 100644
--- a/src/backend/storage/aio/meson.build
+++ b/src/backend/storage/aio/meson.build
@@ -2,6 +2,10 @@
 
 backend_sources += files(
   'aio.c',
+  'aio_callback.c',
   'aio_init.c',
+  'aio_io.c',
+  'aio_target.c',
+  'method_sync.c',
   'read_stream.c',
 )
diff --git a/src/backend/storage/aio/method_sync.c b/src/backend/storage/aio/method_sync.c
new file mode 100644
index 00000000000..43f9c8bd0b3
--- /dev/null
+++ b/src/backend/storage/aio/method_sync.c
@@ -0,0 +1,47 @@
+/*-------------------------------------------------------------------------
+ *
+ * method_sync.c
+ *    AIO - perform "AIO" by executing it synchronously
+ *
+ * This method is mainly to check if AIO use causes regressions. Other IO
+ * methods might also fall back to the synchronous method for functionality
+ * they cannot provide.
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/storage/aio/method_sync.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+
+static bool pgaio_sync_needs_synchronous_execution(PgAioHandle *ioh);
+static int	pgaio_sync_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
+
+
+const IoMethodOps pgaio_sync_ops = {
+	.needs_synchronous_execution = pgaio_sync_needs_synchronous_execution,
+	.submit = pgaio_sync_submit,
+};
+
+
+
+static bool
+pgaio_sync_needs_synchronous_execution(PgAioHandle *ioh)
+{
+	return true;
+}
+
+static int
+pgaio_sync_submit(uint16 num_staged_ios, PgAioHandle **staged_ios)
+{
+	elog(ERROR, "should be unreachable");
+
+	return 0;
+}
diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt
index e199f071628..b5d3dcbf1e9 100644
--- a/src/backend/utils/activity/wait_event_names.txt
+++ b/src/backend/utils/activity/wait_event_names.txt
@@ -191,6 +191,9 @@ ABI_compatibility:
 
 Section: ClassName - WaitEventIO
 
+AIO_SUBMIT	"Waiting for AIO submission."
+AIO_DRAIN	"Waiting for IOs to finish."
+AIO_COMPLETION	"Waiting for completion callback."
 BASEBACKUP_READ	"Waiting for base backup to read from a file."
 BASEBACKUP_SYNC	"Waiting for data written by a base backup to reach durable storage."
 BASEBACKUP_WRITE	"Waiting for base backup to write to a file."
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index ac5ca4a765e..e5d852b5ee6 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -47,6 +47,8 @@
 
 #include "common/hashfn.h"
 #include "common/int.h"
+#include "lib/ilist.h"
+#include "storage/aio.h"
 #include "storage/ipc.h"
 #include "storage/predicate.h"
 #include "storage/proc.h"
@@ -155,6 +157,12 @@ struct ResourceOwnerData
 
 	/* The local locks cache. */
 	LOCALLOCK  *locks[MAX_RESOWNER_LOCKS];	/* list of owned locks */
+
+	/*
+	 * AIO handles need be registered in critical sections and therefore
+	 * cannot use the normal ResoureElem mechanism.
+	 */
+	dlist_head	aio_handles;
 };
 
 
@@ -425,6 +433,8 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
 		parent->firstchild = owner;
 	}
 
+	dlist_init(&owner->aio_handles);
+
 	return owner;
 }
 
@@ -725,6 +735,14 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
 		 * so issue warnings.  In the abort case, just clean up quietly.
 		 */
 		ResourceOwnerReleaseAll(owner, phase, isCommit);
+
+		/* XXX: Could probably be a later phase? */
+		while (!dlist_is_empty(&owner->aio_handles))
+		{
+			dlist_node *node = dlist_head_node(&owner->aio_handles);
+
+			pgaio_io_release_resowner(node, !isCommit);
+		}
 	}
 	else if (phase == RESOURCE_RELEASE_LOCKS)
 	{
@@ -1082,3 +1100,15 @@ ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
 	elog(ERROR, "lock reference %p is not owned by resource owner %s",
 		 locallock, owner->name);
 }
+
+void
+ResourceOwnerRememberAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
+{
+	dlist_push_tail(&owner->aio_handles, ioh_node);
+}
+
+void
+ResourceOwnerForgetAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
+{
+	dlist_delete_from(&owner->aio_handles, ioh_node);
+}
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 3bec090428d..c7f34559b1b 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -1267,6 +1267,7 @@ InvalMessageArray
 InvalidationInfo
 InvalidationMsgsGroup
 IoMethod
+IoMethodOps
 IpcMemoryId
 IpcMemoryKey
 IpcMemoryState
@@ -2105,6 +2106,26 @@ Permutation
 PermutationStep
 PermutationStepBlocker
 PermutationStepBlockerType
+PgAioBackend
+PgAioCtl
+PgAioHandle
+PgAioHandleCallbackID
+PgAioHandleCallbackStage
+PgAioHandleCallbackComplete
+PgAioHandleCallbackReport
+PgAioHandleCallbacks
+PgAioHandleCallbacksEntry
+PgAioHandleFlags
+PgAioHandleState
+PgAioOp
+PgAioOpData
+PgAioResult
+PgAioResultStatus
+PgAioReturn
+PgAioTargetData
+PgAioTargetID
+PgAioTargetInfo
+PgAioWaitRef
 PgArchData
 PgBackendGSSStatus
 PgBackendSSLStatus
-- 
2.48.1.76.g4e746b1a31.dirty

