Path: blob/master/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
40971 views
/*1* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*/2223#include "precompiled.hpp"24#include "gc/shared/gcLogPrecious.hpp"25#include "gc/z/zArray.inline.hpp"26#include "gc/z/zErrno.hpp"27#include "gc/z/zGlobals.hpp"28#include "gc/z/zLargePages.inline.hpp"29#include "gc/z/zMountPoint_linux.hpp"30#include "gc/z/zNUMA.inline.hpp"31#include "gc/z/zPhysicalMemoryBacking_linux.hpp"32#include "gc/z/zSyscall_linux.hpp"33#include "logging/log.hpp"34#include "runtime/init.hpp"35#include "runtime/os.hpp"36#include "runtime/safefetch.inline.hpp"37#include "utilities/align.hpp"38#include "utilities/debug.hpp"39#include "utilities/growableArray.hpp"4041#include <fcntl.h>42#include <stdio.h>43#include <sys/mman.h>44#include <sys/stat.h>45#include <sys/statfs.h>46#include <sys/types.h>47#include <unistd.h>4849//50// Support for building on older Linux systems51//5253// memfd_create(2) flags54#ifndef MFD_CLOEXEC55#define MFD_CLOEXEC 0x0001U56#endif57#ifndef MFD_HUGETLB58#define MFD_HUGETLB 0x0004U59#endif6061// open(2) flags62#ifndef O_CLOEXEC63#define O_CLOEXEC 0200000064#endif65#ifndef O_TMPFILE66#define O_TMPFILE (020000000 | O_DIRECTORY)67#endif6869// fallocate(2) flags70#ifndef FALLOC_FL_KEEP_SIZE71#define FALLOC_FL_KEEP_SIZE 0x0172#endif73#ifndef FALLOC_FL_PUNCH_HOLE74#define FALLOC_FL_PUNCH_HOLE 0x0275#endif7677// Filesystem types, see statfs(2)78#ifndef TMPFS_MAGIC79#define TMPFS_MAGIC 0x0102199480#endif81#ifndef HUGETLBFS_MAGIC82#define HUGETLBFS_MAGIC 0x958458f683#endif8485// Filesystem names86#define ZFILESYSTEM_TMPFS "tmpfs"87#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"8889// Proc file entry for max map mount90#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"9192// Sysfs file for transparent huge page on tmpfs93#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"9495// Java heap filename96#define ZFILENAME_HEAP "java_heap"9798// Preferred tmpfs mount points, ordered by priority99static const char* z_preferred_tmpfs_mountpoints[] = {100"/dev/shm",101"/run/shm",102NULL103};104105// Preferred hugetlbfs mount points, ordered by priority106static const char* z_preferred_hugetlbfs_mountpoints[] = {107"/dev/hugepages",108"/hugepages",109NULL110};111112static int z_fallocate_hugetlbfs_attempts = 3;113static bool z_fallocate_supported = true;114115ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :116_fd(-1),117_filesystem(0),118_block_size(0),119_available(0),120_initialized(false) {121122// Create backing file123_fd = create_fd(ZFILENAME_HEAP);124if (_fd == -1) {125return;126}127128// Truncate backing file129while (ftruncate(_fd, max_capacity) == -1) {130if (errno != EINTR) {131ZErrno err;132log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string());133return;134}135}136137// Get filesystem statistics138struct statfs buf;139if (fstatfs(_fd, &buf) == -1) {140ZErrno err;141log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());142return;143}144145_filesystem = buf.f_type;146_block_size = buf.f_bsize;147_available = buf.f_bavail * _block_size;148149log_info_p(gc, init)("Heap Backing Filesystem: %s (0x" UINT64_FORMAT_X ")",150is_tmpfs() ? ZFILESYSTEM_TMPFS : is_hugetlbfs() ? ZFILESYSTEM_HUGETLBFS : "other", _filesystem);151152// Make sure the filesystem type matches requested large page type153if (ZLargePages::is_transparent() && !is_tmpfs()) {154log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",155ZFILESYSTEM_TMPFS);156return;157}158159if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {160log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",161ZFILESYSTEM_TMPFS);162return;163}164165if (ZLargePages::is_explicit() && !is_hugetlbfs()) {166log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "167"when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);168return;169}170171if (!ZLargePages::is_explicit() && is_hugetlbfs()) {172log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",173ZFILESYSTEM_HUGETLBFS);174return;175}176177if (ZLargePages::is_explicit() && os::large_page_size() != ZGranuleSize) {178log_error_p(gc)("Incompatible large page size configured " SIZE_FORMAT " (expected " SIZE_FORMAT ")",179os::large_page_size(), ZGranuleSize);180return;181}182183// Make sure the filesystem block size is compatible184if (ZGranuleSize % _block_size != 0) {185log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",186_block_size);187return;188}189190if (is_hugetlbfs() && _block_size != ZGranuleSize) {191log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",192ZFILESYSTEM_HUGETLBFS, _block_size, ZGranuleSize);193return;194}195196// Successfully initialized197_initialized = true;198}199200int ZPhysicalMemoryBacking::create_mem_fd(const char* name) const {201// Create file name202char filename[PATH_MAX];203snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");204205// Create file206const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;207const int fd = ZSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);208if (fd == -1) {209ZErrno err;210log_debug_p(gc, init)("Failed to create memfd file (%s)",211((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));212return -1;213}214215log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename);216217return fd;218}219220int ZPhysicalMemoryBacking::create_file_fd(const char* name) const {221const char* const filesystem = ZLargePages::is_explicit()222? ZFILESYSTEM_HUGETLBFS223: ZFILESYSTEM_TMPFS;224const char** const preferred_mountpoints = ZLargePages::is_explicit()225? z_preferred_hugetlbfs_mountpoints226: z_preferred_tmpfs_mountpoints;227228// Find mountpoint229ZMountPoint mountpoint(filesystem, preferred_mountpoints);230if (mountpoint.get() == NULL) {231log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);232return -1;233}234235// Try to create an anonymous file using the O_TMPFILE flag. Note that this236// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.237const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);238if (fd_anon == -1) {239ZErrno err;240log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),241(err == EINVAL ? "Not supported" : err.to_string()));242} else {243// Get inode number for anonymous file244struct stat stat_buf;245if (fstat(fd_anon, &stat_buf) == -1) {246ZErrno err;247log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());248return -1;249}250251log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);252253return fd_anon;254}255256log_debug_p(gc, init)("Falling back to open/unlink");257258// Create file name259char filename[PATH_MAX];260snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id());261262// Create file263const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);264if (fd == -1) {265ZErrno err;266log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string());267return -1;268}269270// Unlink file271if (unlink(filename) == -1) {272ZErrno err;273log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string());274return -1;275}276277log_info_p(gc, init)("Heap Backing File: %s", filename);278279return fd;280}281282int ZPhysicalMemoryBacking::create_fd(const char* name) const {283if (AllocateHeapAt == NULL) {284// If the path is not explicitly specified, then we first try to create a memfd file285// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might286// not be supported at all (requires kernel >= 3.17), or it might not support large287// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a288// file on an accessible tmpfs or hugetlbfs mount point.289const int fd = create_mem_fd(name);290if (fd != -1) {291return fd;292}293294log_debug_p(gc)("Falling back to searching for an accessible mount point");295}296297return create_file_fd(name);298}299300bool ZPhysicalMemoryBacking::is_initialized() const {301return _initialized;302}303304void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {305// Note that the available space on a tmpfs or a hugetlbfs filesystem306// will be zero if no size limit was specified when it was mounted.307if (_available == 0) {308// No size limit set, skip check309log_info_p(gc, init)("Available space on backing filesystem: N/A");310return;311}312313log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);314315// Warn if the filesystem doesn't currently have enough space available to hold316// the max heap size. The max heap size will be capped if we later hit this limit317// when trying to expand the heap.318if (_available < max_capacity) {319log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");320log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");321log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "322"(available", max_capacity / M);323log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "324"size could", _available / M);325log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");326}327}328329void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {330const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;331FILE* const file = fopen(filename, "r");332if (file == NULL) {333// Failed to open file, skip check334log_debug_p(gc, init)("Failed to open %s", filename);335return;336}337338size_t actual_max_map_count = 0;339const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);340fclose(file);341if (result != 1) {342// Failed to read file, skip check343log_debug_p(gc, init)("Failed to read %s", filename);344return;345}346347// The required max map count is impossible to calculate exactly since subsystems348// other than ZGC are also creating memory mappings, and we have no control over that.349// However, ZGC tends to create the most mappings and dominate the total count.350// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.351// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.352const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;353if (actual_max_map_count < required_max_map_count) {354log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");355log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");356log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",357max_capacity / M, filename);358log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "359"with the current", required_max_map_count, actual_max_map_count);360log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");361}362}363364void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {365// Warn if available space is too low366warn_available_space(max_capacity);367368// Warn if max map count is too low369warn_max_map_count(max_capacity);370}371372bool ZPhysicalMemoryBacking::is_tmpfs() const {373return _filesystem == TMPFS_MAGIC;374}375376bool ZPhysicalMemoryBacking::is_hugetlbfs() const {377return _filesystem == HUGETLBFS_MAGIC;378}379380bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {381// If the shmem_enabled file exists and is readable then we382// know the kernel supports transparent huge pages for tmpfs.383return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;384}385386ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {387// On hugetlbfs, mapping a file segment will fail immediately, without388// the need to touch the mapped pages first, if there aren't enough huge389// pages available to back the mapping.390void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);391if (addr == MAP_FAILED) {392// Failed393return errno;394}395396// Once mapped, the huge pages are only reserved. We need to touch them397// to associate them with the file segment. Note that we can not punch398// hole in file segments which only have reserved pages.399if (touch) {400char* const start = (char*)addr;401char* const end = start + length;402os::pretouch_memory(start, end, _block_size);403}404405// Unmap again. From now on, the huge pages that were mapped are allocated406// to this file. There's no risk of getting a SIGBUS when mapping and407// touching these pages again.408if (munmap(addr, length) == -1) {409// Failed410return errno;411}412413// Success414return 0;415}416417static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {418char* const start = (char*)addr;419char* const end = start + length;420421// Touching a mapping that can't be backed by memory will generate a422// SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and423// handled. On tmpfs, doing a fetch (rather than a store) is enough424// to cause backing pages to be allocated (there's no zero-page to425// worry about).426for (char *p = start; p < end; p += page_size) {427if (SafeFetch32((int*)p, -1) == -1) {428// Failed429return false;430}431}432433// Success434return true;435}436437ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const {438// On tmpfs, we need to touch the mapped pages to figure out439// if there are enough pages available to back the mapping.440void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);441if (addr == MAP_FAILED) {442// Failed443return errno;444}445446// Advise mapping to use transparent huge pages447os::realign_memory((char*)addr, length, os::large_page_size());448449// Touch the mapping (safely) to make sure it's backed by memory450const bool backed = safe_touch_mapping(addr, length, _block_size);451452// Unmap again. If successfully touched, the backing memory will453// be allocated to this file. There's no risk of getting a SIGBUS454// when mapping and touching these pages again.455if (munmap(addr, length) == -1) {456// Failed457return errno;458}459460// Success461return backed ? 0 : ENOMEM;462}463464ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {465uint8_t data = 0;466467// Allocate backing memory by writing to each block468for (size_t pos = offset; pos < offset + length; pos += _block_size) {469if (pwrite(_fd, &data, sizeof(data), pos) == -1) {470// Failed471return errno;472}473}474475// Success476return 0;477}478479ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {480// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs481// since Linux 4.3. When fallocate(2) is not supported we emulate it using482// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite483// (for tmpfs without transparent huge pages and other filesystem types).484if (ZLargePages::is_explicit()) {485return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */);486} else if (ZLargePages::is_transparent()) {487return fallocate_compat_mmap_tmpfs(offset, length);488} else {489return fallocate_compat_pwrite(offset, length);490}491}492493ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {494const int mode = 0; // Allocate495const int res = ZSyscall::fallocate(_fd, mode, offset, length);496if (res == -1) {497// Failed498return errno;499}500501// Success502return 0;503}504505ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {506// Using compat mode is more efficient when allocating space on hugetlbfs.507// Note that allocating huge pages this way will only reserve them, and not508// associate them with segments of the file. We must guarantee that we at509// some point touch these segments, otherwise we can not punch hole in them.510// Also note that we need to use compat mode when using transparent huge pages,511// since we need to use madvise(2) on the mapping before the page is allocated.512if (z_fallocate_supported && !ZLargePages::is_enabled()) {513const ZErrno err = fallocate_fill_hole_syscall(offset, length);514if (!err) {515// Success516return 0;517}518519if (err != ENOSYS && err != EOPNOTSUPP) {520// Failed521return err;522}523524// Not supported525log_debug_p(gc)("Falling back to fallocate() compatibility mode");526z_fallocate_supported = false;527}528529return fallocate_fill_hole_compat(offset, length);530}531532ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {533if (ZLargePages::is_explicit()) {534// We can only punch hole in pages that have been touched. Non-touched535// pages are only reserved, and not associated with any specific file536// segment. We don't know which pages have been previously touched, so537// we always touch them here to guarantee that we can punch hole.538const ZErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */);539if (err) {540// Failed541return err;542}543}544545const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;546if (ZSyscall::fallocate(_fd, mode, offset, length) == -1) {547// Failed548return errno;549}550551// Success552return 0;553}554555ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {556// Try first half557const size_t offset0 = offset;558const size_t length0 = align_up(length / 2, _block_size);559const ZErrno err0 = fallocate(punch_hole, offset0, length0);560if (err0) {561return err0;562}563564// Try second half565const size_t offset1 = offset0 + length0;566const size_t length1 = length - length0;567const ZErrno err1 = fallocate(punch_hole, offset1, length1);568if (err1) {569return err1;570}571572// Success573return 0;574}575576ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {577assert(is_aligned(offset, _block_size), "Invalid offset");578assert(is_aligned(length, _block_size), "Invalid length");579580const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);581if (err == EINTR && length > _block_size) {582// Calling fallocate(2) with a large length can take a long time to583// complete. When running profilers, such as VTune, this syscall will584// be constantly interrupted by signals. Expanding the file in smaller585// steps avoids this problem.586return split_and_fallocate(punch_hole, offset, length);587}588589return err;590}591592bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {593log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",594offset / M, (offset + length) / M, length / M);595596retry:597const ZErrno err = fallocate(false /* punch_hole */, offset, length);598if (err) {599if (err == ENOSPC && !is_init_completed() && ZLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) {600// If we fail to allocate during initialization, due to lack of space on601// the hugetlbfs filesystem, then we wait and retry a few times before602// giving up. Otherwise there is a risk that running JVMs back-to-back603// will fail, since there is a delay between process termination and the604// huge pages owned by that process being returned to the huge page pool605// and made available for new allocations.606log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string());607608// Wait and retry in one second, in the hope that huge pages will be609// available by then.610sleep(1);611goto retry;612}613614// Failed615log_error_p(gc)("Failed to commit memory (%s)", err.to_string());616return false;617}618619// Success620return true;621}622623static int offset_to_node(size_t offset) {624const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();625const size_t nindex = (offset >> ZGranuleSizeShift) % mapping->length();626return mapping->at((int)nindex);627}628629size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {630size_t committed = 0;631632// Commit one granule at a time, so that each granule633// can be allocated from a different preferred node.634while (committed < length) {635const size_t granule_offset = offset + committed;636637// Setup NUMA policy to allocate memory from a preferred node638os::Linux::numa_set_preferred(offset_to_node(granule_offset));639640if (!commit_inner(granule_offset, ZGranuleSize)) {641// Failed642break;643}644645committed += ZGranuleSize;646}647648// Restore NUMA policy649os::Linux::numa_set_preferred(-1);650651return committed;652}653654size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {655// Try to commit the whole region656if (commit_inner(offset, length)) {657// Success658return length;659}660661// Failed, try to commit as much as possible662size_t start = offset;663size_t end = offset + length;664665for (;;) {666length = align_down((end - start) / 2, ZGranuleSize);667if (length < ZGranuleSize) {668// Done, don't commit more669return start - offset;670}671672if (commit_inner(start, length)) {673// Success, try commit more674start += length;675} else {676// Failed, try commit less677end -= length;678}679}680}681682size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {683if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {684// To get granule-level NUMA interleaving when using non-large pages,685// we must explicitly interleave the memory at commit/fallocate time.686return commit_numa_interleaved(offset, length);687}688689return commit_default(offset, length);690}691692size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {693log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",694offset / M, (offset + length) / M, length / M);695696const ZErrno err = fallocate(true /* punch_hole */, offset, length);697if (err) {698log_error(gc)("Failed to uncommit memory (%s)", err.to_string());699return 0;700}701702return length;703}704705void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {706const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);707if (res == MAP_FAILED) {708ZErrno err;709fatal("Failed to map memory (%s)", err.to_string());710}711}712713void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {714// Note that we must keep the address space reservation intact and just detach715// the backing memory. For this reason we map a new anonymous, non-accessible716// and non-reserved page over the mapping instead of actually unmapping.717const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);718if (res == MAP_FAILED) {719ZErrno err;720fatal("Failed to map memory (%s)", err.to_string());721}722}723724725