From 3a0dc2893c757be7cbde5d1cd1f7801de073f943 Mon Sep 17 00:00:00 2001 From: David Rowley Date: Fri, 26 Aug 2022 14:42:23 +1200 Subject: [PATCH v1 3/3] Be smarter about freeing tuples during tuplesorts During dumptuples() the call to writetuple() would pfree any non-null tuple. This was quite wasteful as this happens just before we perform a reset of the context which stores all of those tuples. Here we adjust the writetuple signature so we can tell it exactly what we need done in regards to freeing the tuple and/or adjusting the memory accounting. In dumptuples() we must still account for the memory that's been released. Also, since writetuple() is only called twice, let's make it static inline so that the compiler inlines the call and removes the branching to check the bool arguments. --- src/backend/utils/sort/tuplesort.c | 44 ++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index d1c2f5f58f..519c7424f7 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -452,8 +452,9 @@ struct Sharedsort static void tuplesort_begin_batch(Tuplesortstate *state); -static void writetuple(Tuplesortstate *state, LogicalTape *tape, - SortTuple *stup); +static inline void writetuple(Tuplesortstate *state, LogicalTape *tape, + SortTuple *stup, bool freetup, + bool adjust_accounting); static bool consider_abort_common(Tuplesortstate *state); static void inittapes(Tuplesortstate *state, bool mergeruns); static void inittapestate(Tuplesortstate *state, int maxTapes); @@ -1339,18 +1340,22 @@ tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbre } /* - * Write 'stup' out onto 'tape' and, unless using the slab allocator, pfree - * stup's tuple and adjust the memory accounting accordingly. + * Write 'stup' out onto 'tape' and optionally free stup->tuple and optionally + * adjust the memory accounting. */ -static void -writetuple(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup) +static inline void +writetuple(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup, + bool freetup, bool adjust_accounting) { state->base.writetup(state, tape, stup); - if (!state->slabAllocatorUsed && stup->tuple) + if (stup->tuple != NULL) { - FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); - pfree(stup->tuple); + if (adjust_accounting) + FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); + + if (freetup) + pfree(stup->tuple); } } @@ -2251,6 +2256,8 @@ mergeonerun(Tuplesortstate *state) int srcTapeIndex; LogicalTape *srcTape; + Assert(state->slabAllocatorUsed); + /* * Start the merge by loading one tuple from each active source tape into * the heap. @@ -2269,7 +2276,7 @@ mergeonerun(Tuplesortstate *state) /* write the tuple to destTape */ srcTapeIndex = state->memtuples[0].srctape; srcTape = state->inputTapes[srcTapeIndex]; - writetuple(state, state->destTape, &state->memtuples[0]); + writetuple(state, state->destTape, &state->memtuples[0], false, false); /* recycle the slot of the tuple we just wrote out, for the next read */ if (state->memtuples[0].tuple) @@ -2415,16 +2422,23 @@ dumptuples(Tuplesortstate *state, bool alltuples) memtupwrite = state->memtupcount; for (i = 0; i < memtupwrite; i++) { - writetuple(state, state->destTape, &state->memtuples[i]); + /* + * No need to free the tuple as we're resetting the tuplecontext + * below. We do need to adjust the memory accounting for the tuple, + * however. + */ + writetuple(state, state->destTape, &state->memtuples[i], false, true); state->memtupcount--; } /* - * Reset tuple memory. We've freed all of the tuples that we previously - * allocated. It's important to avoid fragmentation when there is a stark - * change in the sizes of incoming tuples. Fragmentation due to + * Reset tuple memory. This saves us from having to pfree() each + * individual tuple and helps to avoid memory fragmentation when there is + * a stark change in the sizes of incoming tuples. Fragmentation due to * AllocSetFree's bucketing by size class might be particularly bad if - * this step wasn't taken. + * this step wasn't taken. We may be using the Generation allocator, + * which does not suffer from this issue, however it does not seem worth + * special handling for that. */ MemoryContextReset(state->base.tuplecontext); -- 2.34.1