From 7993cede8939cad9172867ccc690a44ea25d1ad6 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Fri, 29 Mar 2024 00:22:53 +1300 Subject: [PATCH] fixup: respect io_combine_limit in bulk_write.c --- src/backend/storage/smgr/bulk_write.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/backend/storage/smgr/bulk_write.c b/src/backend/storage/smgr/bulk_write.c index 848c3054f5..612a9a23b3 100644 --- a/src/backend/storage/smgr/bulk_write.c +++ b/src/backend/storage/smgr/bulk_write.c @@ -45,12 +45,6 @@ #define MAX_PENDING_WRITES XLR_MAX_BLOCK_ID -/* - * How many blocks to send to smgrwritev() at a time. Arbitrary value for - * now. - */ -#define MAX_BLOCKS_PER_WRITE ((128 * 1024) / BLCKSZ) - static const PGIOAlignedBlock zero_buffer = {{0}}; /* worth BLCKSZ */ typedef struct PendingWrite @@ -232,7 +226,7 @@ smgr_bulk_flush(BulkWriteState *bulkstate) for (int i = 0; i < npending; i++) { Page page; - const void *pages[MAX_BLOCKS_PER_WRITE]; + const void *pages[MAX_IO_COMBINE_LIMIT]; BlockNumber blkno; int nblocks; int max_nblocks; @@ -266,14 +260,14 @@ smgr_bulk_flush(BulkWriteState *bulkstate) * We're overwriting. Clamp at the existing size, because we * can't mix writing and extending in a single operation. */ - max_nblocks = Min(lengthof(pages), + max_nblocks = Min(io_combine_limit, bulkstate->pages_written - blkno); } else { /* We're extending. */ Assert(blkno == bulkstate->pages_written); - max_nblocks = lengthof(pages); + max_nblocks = io_combine_limit; } /* Find as many consecutive blocks as we can. */ -- 2.39.3 (Apple Git-146)