From 6238657ddb8c9e63d28a1a96712278f548d3292c Mon Sep 17 00:00:00 2001 From: Dmitrii Dolgov <9erthalion6@gmail.com> Date: Tue, 17 Jun 2025 11:47:04 +0200 Subject: [PATCH v5 05/10] Address space reservation for shared memory Currently the shared memory layout is designed to pack everything tight together, leaving no space between mappings for resizing. Here is how it looks like for one mapping in /proc/$PID/maps, /dev/zero represents the anonymous shared memory we talk about: 00400000-00490000 /path/bin/postgres ... 012d9000-0133e000 [heap] 7f443a800000-7f470a800000 /dev/zero (deleted) 7f470a800000-7f471831d000 /usr/lib/locale/locale-archive 7f4718400000-7f4718401000 /usr/lib64/libstdc++.so.6.0.34 ... Make the layout more dynamic via splitting every shared memory segment into two parts: * An anonymous file, which actually contains shared memory content. Such an anonymous file is created via memfd_create, it lives in memory, behaves like a regular file and semantically equivalent to an anonymous memory allocated via mmap with MAP_ANONYMOUS. * A reservation mapping, which size is much larger than required shared segment size. This mapping is created with flags PROT_NONE (which makes sure the reserved space is not used), and MAP_NORESERVE (to not count the reserved space against memory limits). The anonymous file is mapped into this reservation mapping. The resulting layout looks like this: 00400000-00490000 /path/bin/postgres ... 3f526000-3f590000 rw-p [heap] 7fbd827fe000-7fbd8bdde000 rw-s /memfd:main (deleted) -- anon file 7fbd8bdde000-7fbe82800000 ---s /memfd:main (deleted) -- reservation 7fbe82800000-7fbe90670000 r--p /usr/lib/locale/locale-archive 7fbe90800000-7fbe90941000 r-xp /usr/lib64/libstdc++.so.6.0.34 To resize a shared memory segment in this layout it's possible to use ftruncate on the anonymous file, adjusting access permissions on the reserved space as needed. This approach also do not impact the actual memory usage as reported by the kernel. Here is the output of /proc/$PID/status for the master version with shared_buffers = 128 MB: // Peak virtual memory size, which is described as total pages // mapped in mm_struct. It corresponds to the mapped reserved space // and is the only number that grows with it. VmPeak: 2043192 kB // Size of memory portions. It contains RssAnon + RssFile + RssShmem VmRSS: 22908 kB // Size of resident anonymous memory RssAnon: 768 kB // Size of resident file mappings RssFile: 10364 kB // Size of resident shmem memory (includes SysV shm, mapping of tmpfs and // shared anonymous mappings) RssShmem: 11776 kB Here is the same for the patch when reserving 20GB of space: VmPeak: 21255824 kB VmRSS: 25020 kB RssAnon: 768 kB RssFile: 10812 kB RssShmem: 13440 kB Cgroup v2 doesn't have any problems with that as well. To verify a new cgroup was created with the memory limit 256 MB, then PostgreSQL was launched withing this cgroup with shared_buffers = 128 MB: $ cd /sys/fs/cgroup $ mkdir postgres $ cd postres $ echo 268435456 > memory.max $ echo $MASTER_PID_SHELL > cgroup.procs # postgres from the master branch has being successfully launched # from that shell $ cat memory.current 17465344 (~16.6 MB) # stop postgres $ echo $PATCH_PID_SHELL > cgroup.procs # postgres from the patch has being successfully launched from that shell $ cat memory.current 20770816 (~19.8 MB) To control the amount of space reserved a new GUC max_available_memory is introduced. Ideally it should be based on the maximum available memory, hense the name. There are also few unrelated advantages of using anon files: * We've got a file descriptor, which could be used for regular file operations (modification, truncation, you name it). * The file could be given a name, which improves readability when it comes to process maps. * By default, Linux will not add file-backed shared mappings into a core dump, making it more convenient to work with them in PostgreSQL: no more huge dumps to process. The downside is that memfd_create is Linux specific. --- src/backend/port/sysv_shmem.c | 290 ++++++++++++++++++++++------ src/backend/port/win32_shmem.c | 2 +- src/backend/storage/ipc/ipci.c | 5 +- src/backend/storage/ipc/shmem.c | 2 +- src/backend/utils/init/globals.c | 1 + src/backend/utils/misc/guc_tables.c | 14 ++ src/include/miscadmin.h | 1 + src/include/portability/mem.h | 2 +- src/include/storage/pg_shmem.h | 5 +- 9 files changed, 262 insertions(+), 60 deletions(-) diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c index 56af0231d24..363ddfd1fca 100644 --- a/src/backend/port/sysv_shmem.c +++ b/src/backend/port/sysv_shmem.c @@ -97,10 +97,12 @@ void *UsedShmemSegAddr = NULL; typedef struct AnonymousMapping { int shmem_segment; - Size shmem_size; /* Size of the mapping */ + Size shmem_size; /* Size of the actually used memory */ + Size shmem_reserved; /* Size of the reserved mapping */ Pointer shmem; /* Pointer to the start of the mapped memory */ Pointer seg_addr; /* SysV shared memory for the header */ unsigned long seg_id; /* IPC key */ + int segment_fd; /* fd for the backing anon file */ } AnonymousMapping; static AnonymousMapping Mappings[ANON_MAPPINGS]; @@ -108,6 +110,49 @@ static AnonymousMapping Mappings[ANON_MAPPINGS]; /* Keeps track of used mapping segments */ static int next_free_segment = 0; +/* + * Anonymous mapping layout we use looks like this: + * + * 00400000-00c2a000 r-xp /bin/postgres + * ... + * 3f526000-3f590000 rw-p [heap] + * 7fbd827fe000-7fbd8bdde000 rw-s /memfd:main (deleted) + * 7fbd8bdde000-7fbe82800000 ---s /memfd:main (deleted) + * 7fbe82800000-7fbe90670000 r--p /usr/lib/locale/locale-archive + * 7fbe90800000-7fbe90941000 r-xp /usr/lib64/libstdc++.so.6.0.34 + * ... + * + * We need to place shared memory mappings in such a way, that there will be + * gaps between them in the address space. Those gaps have to be large enough + * to resize the mapping up to certain size, without counting towards the total + * memory consumption. + * + * To achieve this, for each shared memory segment we first create an anonymous + * file of specified size using memfd_create, which will accomodate actual + * shared memory mapping content. It is represented by the first /memfd:main + * with rw permissions. Then we create a mapping for this file using mmap, with + * size much larger than required and flags PROT_NONE (allows to make sure the + * reserved space will not be used) and MAP_NORESERVE (prevents the space from + * being counted against memory limits). The mapping serves as an address space + * reservation, into which shared memory segment can be extended and is + * represented by the second /memfd:main with no permissions. + * + * The reserved space for each segment is calculated as a fraction of the total + * reserved space (MaxAvailableMemory), as specified in the SHMEM_RESIZE_RATIO + * array. + */ +static double SHMEM_RESIZE_RATIO[1] = { + 1.0, /* MAIN_SHMEM_SLOT */ +}; + +/* + * Flag telling that we have decided to use huge pages. + * + * XXX: It's possible to use GetConfigOption("huge_pages_status", false, false) + * instead, but it feels like an overkill. + */ +static bool huge_pages_on = false; + static void *InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size); static void IpcMemoryDetach(int status, Datum shmaddr); static void IpcMemoryDelete(int status, Datum shmId); @@ -503,19 +548,20 @@ PGSharedMemoryAttach(IpcMemoryId shmId, * hugepage sizes, we might want to think about more invasive strategies, * such as increasing shared_buffers to absorb the extra space. * - * Returns the (real, assumed or config provided) page size into - * *hugepagesize, and the hugepage-related mmap flags to use into - * *mmap_flags if requested by the caller. If huge pages are not supported, - * *hugepagesize and *mmap_flags are set to 0. + * Returns the (real, assumed or config provided) page size into *hugepagesize, + * the hugepage-related mmap and memfd flags to use into *mmap_flags and + * *memfd_flags if requested by the caller. If huge pages are not supported, + * *hugepagesize, *mmap_flags and *memfd_flags are set to 0. */ void -GetHugePageSize(Size *hugepagesize, int *mmap_flags) +GetHugePageSize(Size *hugepagesize, int *mmap_flags, int *memfd_flags) { #ifdef MAP_HUGETLB Size default_hugepagesize = 0; Size hugepagesize_local = 0; int mmap_flags_local = 0; + int memfd_flags_local = 0; /* * System-dependent code to find out the default huge page size. @@ -574,6 +620,7 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags) } mmap_flags_local = MAP_HUGETLB; + memfd_flags_local = MFD_HUGETLB; /* * On recent enough Linux, also include the explicit page size, if @@ -584,7 +631,16 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags) { int shift = pg_ceil_log2_64(hugepagesize_local); - mmap_flags_local |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT; + memfd_flags_local |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT; + } +#endif + +#if defined(MFD_HUGE_MASK) && defined(MFD_HUGE_SHIFT) + if (hugepagesize_local != default_hugepagesize) + { + int shift = pg_ceil_log2_64(hugepagesize_local); + + memfd_flags_local |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT; } #endif @@ -593,6 +649,8 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags) *mmap_flags = mmap_flags_local; if (hugepagesize) *hugepagesize = hugepagesize_local; + if (memfd_flags) + *memfd_flags = memfd_flags_local; #else @@ -600,6 +658,8 @@ GetHugePageSize(Size *hugepagesize, int *mmap_flags) *hugepagesize = 0; if (mmap_flags) *mmap_flags = 0; + if (memfd_flags) + *memfd_flags = 0; #endif /* MAP_HUGETLB */ } @@ -625,72 +685,90 @@ check_huge_page_size(int *newval, void **extra, GucSource source) * Creates an anonymous mmap()ed shared memory segment. * * This function will modify mapping size to the actual size of the allocation, - * if it ends up allocating a segment that is larger than requested. + * if it ends up allocating a segment that is larger than requested. If needed, + * it also rounds up the mapping reserved size to be a multiple of huge page + * size. + * + * Note that we do not fallback from huge pages to regular pages in this + * function, this decision was already made in ReserveAnonymousMemory and we + * stick to it. */ static void CreateAnonymousSegment(AnonymousMapping *mapping) { Size allocsize = mapping->shmem_size; void *ptr = MAP_FAILED; - int mmap_errno = 0; + int save_errno = 0; + int mmap_flags = PG_MMAP_FLAGS, memfd_flags = 0; + + elog(DEBUG1, "segment[%s]: size %zu, reserved %zu", + MappingName(mapping->shmem_segment), mapping->shmem_size, + mapping->shmem_reserved); #ifndef MAP_HUGETLB - /* PGSharedMemoryCreate should have dealt with this case */ - Assert(huge_pages != HUGE_PAGES_ON); + /* PrepareHugePages should have dealt with this case */ + Assert(huge_pages != HUGE_PAGES_ON && !huge_pages_on); #else - if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY) + if (huge_pages_on) { - /* - * Round up the request size to a suitable large value. - */ Size hugepagesize; - int mmap_flags; - GetHugePageSize(&hugepagesize, &mmap_flags); + /* Make sure nothing is messed up */ + Assert(huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY); + + /* Round up the request size to a suitable large value */ + GetHugePageSize(&hugepagesize, &mmap_flags, &memfd_flags); if (allocsize % hugepagesize != 0) allocsize += hugepagesize - (allocsize % hugepagesize); - ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE, - PG_MMAP_FLAGS | mmap_flags, -1, 0); - mmap_errno = errno; - if (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED) - { - DebugMappings(); - elog(DEBUG1, "segment[%s]: mmap(%zu) with MAP_HUGETLB failed, huge pages disabled: %m", - MappingName(mapping->shmem_segment), allocsize); - } + /* + * The reserved space is multiple of BLCKSZ. We know the huge page + * size, round up the reserved space to it. + */ + mapping->shmem_reserved = mapping->shmem_reserved + hugepagesize - + (mapping->shmem_reserved % hugepagesize); + + /* Verify that the new size is withing the reserved boundaries */ + if (mapping->shmem_reserved < mapping->shmem_size) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("not enough shared memory is reserved"), + errhint("You may need to increase \"max_available_memory\"."))); + + mmap_flags = PG_MMAP_FLAGS | mmap_flags; } #endif /* - * Report whether huge pages are in use. This needs to be tracked before - * the second mmap() call if attempting to use huge pages failed - * previously. + * Prepare an anonymous file backing the segment. Its size will be + * specified later via ftruncate. + * + * The file behaves like a regular file, but lives in memory. Once all + * references to the file are dropped, it is automatically released. + * Anonymous memory is used for all backing pages of the file, thus it has + * the same semantics as anonymous memory allocations using mmap with the + * MAP_ANONYMOUS flag. */ - SetConfigOption("huge_pages_status", (ptr == MAP_FAILED) ? "off" : "on", - PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT); + mapping->segment_fd = memfd_create(MappingName(mapping->shmem_segment), + memfd_flags); - if (ptr == MAP_FAILED && huge_pages != HUGE_PAGES_ON) + /* + * Specify the segment file size using allocsize, which contains + * potentially modified value. + */ + if(ftruncate(mapping->segment_fd, allocsize) == -1) { - /* - * Use the original size, not the rounded-up value, when falling back - * to non-huge pages. - */ - allocsize = mapping->shmem_size; - ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE, - PG_MMAP_FLAGS, -1, 0); - mmap_errno = errno; - } + save_errno = errno; - if (ptr == MAP_FAILED) - { - errno = mmap_errno; DebugMappings(); + close(mapping->segment_fd); + + errno = save_errno; ereport(FATAL, - (errmsg("segment[%s]: could not map anonymous shared memory: %m", + (errmsg("segment[%s]: could not truncate anonymous file: %m", MappingName(mapping->shmem_segment)), - (mmap_errno == ENOMEM) ? + (save_errno == ENOMEM) ? errhint("This error usually means that PostgreSQL's request " "for a shared memory segment exceeded available memory, " "swap space, or huge pages. To reduce the request size " @@ -700,10 +778,112 @@ CreateAnonymousSegment(AnonymousMapping *mapping) allocsize) : 0)); } + elog(DEBUG1, "segment[%s]: mmap(%zu)", + MappingName(mapping->shmem_segment), allocsize); + + /* + * Create a reservation mapping. + */ + ptr = mmap(NULL, mapping->shmem_reserved, PROT_NONE, + mmap_flags | MAP_NORESERVE, mapping->segment_fd, 0); + save_errno = errno; + + if (ptr == MAP_FAILED) + { + DebugMappings(); + + errno = save_errno; + ereport(FATAL, + (errmsg("segment[%s]: could not map anonymous shared memory: %m", + MappingName(mapping->shmem_segment)))); + } + + /* Make the memory accessible */ + if(mprotect(ptr, allocsize, PROT_READ | PROT_WRITE) == -1) + { + save_errno = errno; + DebugMappings(); + + errno = save_errno; + ereport(FATAL, + (errmsg("segment[%s]: could not mprotect anonymous shared memory: %m", + MappingName(mapping->shmem_segment)))); + } + mapping->shmem = ptr; mapping->shmem_size = allocsize; } +/* + * PrepareHugePages + * + * Figure out if there are enough huge pages to allocate all shared memory + * segments, and report that information via huge_pages_status and + * huge_pages_on. It needs to be called before creating shared memory segments. + * + * It is necessary to maintain the same semantic (simple on/off) for + * huge_pages_status, even if there are multiple shared memory segments: all + * segments either use huge pages or not, there is no mix of segments with + * different page size. The latter might be actually beneficial, in particular + * because only some segments may require large amount of memory, but for now + * we go with a simple solution. + */ +void +PrepareHugePages() +{ + void *ptr = MAP_FAILED; + + /* Reset to handle reinitialization */ + next_free_segment = 0; + + /* Complain if hugepages demanded but we can't possibly support them */ +#if !defined(MAP_HUGETLB) + if (huge_pages == HUGE_PAGES_ON) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge pages not supported on this platform"))); +#else + if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY) + { + Size hugepagesize, total_size = 0; + int mmap_flags; + + GetHugePageSize(&hugepagesize, &mmap_flags, NULL); + + /* + * Figure out how much memory is needed for all segments, keeping in + * mind that for every segment this value will be rounding up by the + * huge page size. The resulting value will be used to probe memory and + * decide whether we will allocate huge pages or not. + */ + for(int segment = 0; segment < ANON_MAPPINGS; segment++) + { + int numSemas; + Size segment_size = CalculateShmemSize(&numSemas, segment); + + if (segment_size % hugepagesize != 0) + segment_size += hugepagesize - (segment_size % hugepagesize); + + total_size += segment_size; + } + + /* Map total amount of memory to test its availability. */ + elog(DEBUG1, "reserving space: probe mmap(%zu) with MAP_HUGETLB", + total_size); + ptr = mmap(NULL, total_size, PROT_NONE, + PG_MMAP_FLAGS | MAP_ANONYMOUS | mmap_flags, -1, 0); + } +#endif + + /* + * Report whether huge pages are in use. This needs to be tracked before + * creating shared memory segments. + */ + SetConfigOption("huge_pages_status", (ptr == MAP_FAILED) ? "off" : "on", + PGC_INTERNAL, PGC_S_DYNAMIC_DEFAULT); + huge_pages_on = ptr != MAP_FAILED; +} + /* * AnonymousShmemDetach --- detach from an anonymous mmap'd block * (called as an on_shmem_exit callback, hence funny argument list) @@ -746,7 +926,7 @@ PGSharedMemoryCreate(Size size, void *memAddress; PGShmemHeader *hdr; struct stat statbuf; - Size sysvsize; + Size sysvsize, total_reserved; AnonymousMapping *mapping = &Mappings[next_free_segment]; /* @@ -760,14 +940,6 @@ PGSharedMemoryCreate(Size size, errmsg("could not stat data directory \"%s\": %m", DataDir))); - /* Complain if hugepages demanded but we can't possibly support them */ -#if !defined(MAP_HUGETLB) - if (huge_pages == HUGE_PAGES_ON) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("huge pages not supported on this platform"))); -#endif - /* For now, we don't support huge pages in SysV memory */ if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP) ereport(ERROR, @@ -776,8 +948,16 @@ PGSharedMemoryCreate(Size size, /* Room for a header? */ Assert(size > MAXALIGN(sizeof(PGShmemHeader))); + + /* Prepare the mapping information */ mapping->shmem_size = size; mapping->shmem_segment = next_free_segment; + total_reserved = (Size) MaxAvailableMemory * BLCKSZ; + mapping->shmem_reserved = total_reserved * SHMEM_RESIZE_RATIO[next_free_segment]; + + /* Round up to be a multiple of BLCKSZ */ + mapping->shmem_reserved = mapping->shmem_reserved + BLCKSZ - + (mapping->shmem_reserved % BLCKSZ); if (shared_memory_type == SHMEM_TYPE_MMAP) { diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c index 4dee856d6bd..732fedee87e 100644 --- a/src/backend/port/win32_shmem.c +++ b/src/backend/port/win32_shmem.c @@ -627,7 +627,7 @@ pgwin32_ReserveSharedMemoryRegion(HANDLE hChild) * use GetLargePageMinimum() instead. */ void -GetHugePageSize(Size *hugepagesize, int *mmap_flags) +GetHugePageSize(Size *hugepagesize, int *mmap_flags, int *memfd_flags) { if (hugepagesize) *hugepagesize = 0; diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index 8b38e985327..b60f7ef9ce2 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -206,6 +206,9 @@ CreateSharedMemoryAndSemaphores(void) Assert(!IsUnderPostmaster); + /* Decide if we use huge pages or regular size pages */ + PrepareHugePages(); + for(int segment = 0; segment < ANON_MAPPINGS; segment++) { /* Compute the size of the shared-memory block */ @@ -377,7 +380,7 @@ InitializeShmemGUCs(void) /* * Calculate the number of huge pages required. */ - GetHugePageSize(&hp_size, NULL); + GetHugePageSize(&hp_size, NULL, NULL); if (hp_size != 0) { Size hp_required; diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 72255a1c5ca..8d025f0e907 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -817,7 +817,7 @@ pg_get_shmem_pagesize(void) Assert(huge_pages_status != HUGE_PAGES_UNKNOWN); if (huge_pages_status == HUGE_PAGES_ON) - GetHugePageSize(&os_page_size, NULL); + GetHugePageSize(&os_page_size, NULL, NULL); return os_page_size; } diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c index d31cb45a058..90d3feb547c 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -140,6 +140,7 @@ int max_parallel_maintenance_workers = 2; * register background workers. */ int NBuffers = 16384; +int MaxAvailableMemory = 524288; int MaxConnections = 100; int max_worker_processes = 8; int max_parallel_workers = 8; diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index f04bfedb2fd..a221e446d6a 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -2376,6 +2376,20 @@ struct config_int ConfigureNamesInt[] = NULL, NULL, NULL }, + { + {"max_available_memory", PGC_SIGHUP, RESOURCES_MEM, + gettext_noop("Sets the upper limit for the shared_buffers value."), + gettext_noop("Shared memory could be resized at runtime, this " + "parameters sets the upper limit for it, beyond which " + "resizing would not be supported. Normally this value " + "would be the same as the total available memory."), + GUC_UNIT_BLOCKS + }, + &MaxAvailableMemory, + 524288, 16, INT_MAX / 2, + NULL, NULL, NULL + }, + { {"vacuum_buffer_usage_limit", PGC_USERSET, RESOURCES_MEM, gettext_noop("Sets the buffer pool size for VACUUM, ANALYZE, and autovacuum."), diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 1bef98471c3..a0c37a7749e 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -173,6 +173,7 @@ extern PGDLLIMPORT char *DataDir; extern PGDLLIMPORT int data_directory_mode; extern PGDLLIMPORT int NBuffers; +extern PGDLLIMPORT int MaxAvailableMemory; extern PGDLLIMPORT int MaxBackends; extern PGDLLIMPORT int MaxConnections; extern PGDLLIMPORT int max_worker_processes; diff --git a/src/include/portability/mem.h b/src/include/portability/mem.h index ef9800732d9..40588ff6968 100644 --- a/src/include/portability/mem.h +++ b/src/include/portability/mem.h @@ -38,7 +38,7 @@ #define MAP_NOSYNC 0 #endif -#define PG_MMAP_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE) +#define PG_MMAP_FLAGS (MAP_SHARED|MAP_HASSEMAPHORE) /* Some really old systems don't define MAP_FAILED. */ #ifndef MAP_FAILED diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h index 2348c59b5a0..79b0b1ef9eb 100644 --- a/src/include/storage/pg_shmem.h +++ b/src/include/storage/pg_shmem.h @@ -61,6 +61,7 @@ extern PGDLLIMPORT int shared_memory_type; extern PGDLLIMPORT int huge_pages; extern PGDLLIMPORT int huge_page_size; extern PGDLLIMPORT int huge_pages_status; +extern PGDLLIMPORT int MaxAvailableMemory; /* Possible values for huge_pages and huge_pages_status */ typedef enum @@ -104,7 +105,9 @@ extern PGShmemHeader *PGSharedMemoryCreate(Size size, PGShmemHeader **shim); extern bool PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2); extern void PGSharedMemoryDetach(void); -extern void GetHugePageSize(Size *hugepagesize, int *mmap_flags); +extern void GetHugePageSize(Size *hugepagesize, int *mmap_flags, + int *memfd_flags); +void PrepareHugePages(void); /* The main segment, contains everything except buffer blocks and related data. */ #define MAIN_SHMEM_SEGMENT 0 -- 2.49.0