From bd10f56beb365e3cbfadb87d27f8aeb4b33f880e Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Fri, 20 Jan 2017 18:04:51 -0300 Subject: [PATCH] Prefetch blocks during lazy vacuum's truncation scan The truncation scan can be sped up on rotating media by prefetching blocks in forward direction, because the is a cue for the operating system's readahead to kick in, so that by the time we request those blocks, they are already in memory. Author: Claudio Freire Discussion: https://postgr.es/m/CAGTBQpa6NFGO_6g_y_7zQx8L9GcHDSQKYdo1tGuh791z6PYgEg@mail.gmail.com --- src/backend/commands/vacuumlazy.c | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index a2999b3..e676072 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -98,6 +98,12 @@ */ #define SKIP_PAGES_THRESHOLD ((BlockNumber) 32) +/* + * Size of the prefetch window for lazy vacuum backwards truncation scan. + * Needs to be a power of 2. + */ +#define PREFETCH_SIZE 32 + typedef struct LVRelStats { /* hasindex = true means two-pass strategy; false means one-pass */ @@ -1825,14 +1831,24 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) { - BlockNumber blkno; + BlockNumber blkno, + prefetchBlkno; instr_time starttime; /* Initialize the starttime if we check for conflicting lock requests */ INSTR_TIME_SET_CURRENT(starttime); - /* Strange coding of loop control is needed because blkno is unsigned */ + /* + * Start checking blocks at what we believe relation end to be and move + * backwards. (Strange coding of loop control is needed because blkno is + * unsigned.) To make it a bit faster, we prefetch a bunch of blocks at a + * time in forward direction, so that OS-level readahead can kick in to + * speed this up. + */ blkno = vacrelstats->rel_pages; + prefetchBlkno = blkno & ~(PREFETCH_SIZE - 1); + prefetchBlkno = + (prefetchBlkno > PREFETCH_SIZE) ? prefetchBlkno - PREFETCH_SIZE : 0; while (blkno > vacrelstats->nonempty_pages) { Buffer buf; @@ -1882,6 +1898,22 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) blkno--; + /* If we haven't prefetched this lot yet, do so now. */ + if (blkno <= prefetchBlkno) + { + BlockNumber pblkno; + + if (prefetchBlkno >= PREFETCH_SIZE) + prefetchBlkno -= PREFETCH_SIZE; + else + prefetchBlkno = 0; + for (pblkno = prefetchBlkno; pblkno < blkno; pblkno++) + { + PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno); + CHECK_FOR_INTERRUPTS(); + } + } + buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vac_strategy); -- 2.1.4