From 9fb610126c607a6ced971d774127f2255ad0bbf8 Mon Sep 17 00:00:00 2001 From: "Andrey V. Lepikhov" Date: Wed, 6 Dec 2023 10:52:05 +0700 Subject: [PATCH] Bugfix. Guard total number of hash table buckets. It must be arranged with an max number, corresponding to maximum possible allocated size of DSA memory. --- src/backend/executor/nodeHash.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index e72f0986c2..fdba7c85d3 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1155,6 +1155,9 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) double dtuples; double dbuckets; int new_nbuckets; + const uint32 max_buckets = + pg_prevpower2_32((uint32) + (MaxAllocSize / sizeof(dsa_pointer_atomic))); /* * We probably also need a smaller bucket array. How many @@ -1168,8 +1171,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) */ dtuples = (old_batch0->ntuples * 2.0) / new_nbatch; dbuckets = ceil(dtuples / NTUP_PER_BUCKET); - dbuckets = Min(dbuckets, - MaxAllocSize / sizeof(dsa_pointer_atomic)); + dbuckets = Min(dbuckets, max_buckets); new_nbuckets = (int) dbuckets; new_nbuckets = Max(new_nbuckets, 1024); new_nbuckets = pg_nextpower2_32(new_nbuckets); -- 2.43.0