Path: blob/main/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
35291 views
//===-- release.h -----------------------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#ifndef SCUDO_RELEASE_H_9#define SCUDO_RELEASE_H_1011#include "common.h"12#include "list.h"13#include "mem_map.h"14#include "mutex.h"15#include "thread_annotations.h"1617namespace scudo {1819template <typename MemMapT> class RegionReleaseRecorder {20public:21RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)22: RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}2324uptr getReleasedRangesCount() const { return ReleasedRangesCount; }2526uptr getReleasedBytes() const { return ReleasedBytes; }2728uptr getBase() const { return Base; }2930// Releases [From, To) range of pages back to OS. Note that `From` and `To`31// are offseted from `Base` + Offset.32void releasePageRangeToOS(uptr From, uptr To) {33const uptr Size = To - From;34RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);35ReleasedRangesCount++;36ReleasedBytes += Size;37}3839private:40uptr ReleasedRangesCount = 0;41uptr ReleasedBytes = 0;42MemMapT *RegionMemMap = nullptr;43uptr Base = 0;44// The release offset from Base. This is used when we know a given range after45// Base will not be released.46uptr Offset = 0;47};4849class ReleaseRecorder {50public:51ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)52: Base(Base), Offset(Offset), Data(Data) {}5354uptr getReleasedRangesCount() const { return ReleasedRangesCount; }5556uptr getReleasedBytes() const { return ReleasedBytes; }5758uptr getBase() const { return Base; }5960// Releases [From, To) range of pages back to OS.61void releasePageRangeToOS(uptr From, uptr To) {62const uptr Size = To - From;63releasePagesToOS(Base, From + Offset, Size, Data);64ReleasedRangesCount++;65ReleasedBytes += Size;66}6768private:69uptr ReleasedRangesCount = 0;70uptr ReleasedBytes = 0;71// The starting address to release. Note that we may want to combine (Base +72// Offset) as a new Base. However, the Base is retrieved from73// `MapPlatformData` on Fuchsia, which means the offset won't be aware.74// Therefore, store them separately to make it work on all the platforms.75uptr Base = 0;76// The release offset from Base. This is used when we know a given range after77// Base will not be released.78uptr Offset = 0;79MapPlatformData *Data = nullptr;80};8182class FragmentationRecorder {83public:84FragmentationRecorder() = default;8586uptr getReleasedPagesCount() const { return ReleasedPagesCount; }8788void releasePageRangeToOS(uptr From, uptr To) {89DCHECK_EQ((To - From) % getPageSizeCached(), 0U);90ReleasedPagesCount += (To - From) / getPageSizeCached();91}9293private:94uptr ReleasedPagesCount = 0;95};9697// A buffer pool which holds a fixed number of static buffers of `uptr` elements98// for fast buffer allocation. If the request size is greater than99// `StaticBufferNumElements` or if all the static buffers are in use, it'll100// delegate the allocation to map().101template <uptr StaticBufferCount, uptr StaticBufferNumElements>102class BufferPool {103public:104// Preserve 1 bit in the `Mask` so that we don't need to do zero-check while105// extracting the least significant bit from the `Mask`.106static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");107static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),108SCUDO_CACHE_LINE_SIZE),109"");110111struct Buffer {112// Pointer to the buffer's memory, or nullptr if no buffer was allocated.113uptr *Data = nullptr;114115// The index of the underlying static buffer, or StaticBufferCount if this116// buffer was dynamically allocated. This value is initially set to a poison117// value to aid debugging.118uptr BufferIndex = ~static_cast<uptr>(0);119120// Only valid if BufferIndex == StaticBufferCount.121MemMapT MemMap = {};122};123124// Return a zero-initialized buffer which can contain at least the given125// number of elements, or nullptr on failure.126Buffer getBuffer(const uptr NumElements) {127if (UNLIKELY(NumElements > StaticBufferNumElements))128return getDynamicBuffer(NumElements);129130uptr index;131{132// TODO: In general, we expect this operation should be fast so the133// waiting thread won't be put into sleep. The HybridMutex does implement134// the busy-waiting but we may want to review the performance and see if135// we need an explict spin lock here.136ScopedLock L(Mutex);137index = getLeastSignificantSetBitIndex(Mask);138if (index < StaticBufferCount)139Mask ^= static_cast<uptr>(1) << index;140}141142if (index >= StaticBufferCount)143return getDynamicBuffer(NumElements);144145Buffer Buf;146Buf.Data = &RawBuffer[index * StaticBufferNumElements];147Buf.BufferIndex = index;148memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));149return Buf;150}151152void releaseBuffer(Buffer Buf) {153DCHECK_NE(Buf.Data, nullptr);154DCHECK_LE(Buf.BufferIndex, StaticBufferCount);155if (Buf.BufferIndex != StaticBufferCount) {156ScopedLock L(Mutex);157DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);158Mask |= static_cast<uptr>(1) << Buf.BufferIndex;159} else {160Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());161}162}163164bool isStaticBufferTestOnly(const Buffer &Buf) {165DCHECK_NE(Buf.Data, nullptr);166DCHECK_LE(Buf.BufferIndex, StaticBufferCount);167return Buf.BufferIndex != StaticBufferCount;168}169170private:171Buffer getDynamicBuffer(const uptr NumElements) {172// When using a heap-based buffer, precommit the pages backing the173// Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization174// where page fault exceptions are skipped as the allocated memory175// is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a176// performance benefit on other platforms.177const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);178const uptr MappedSize =179roundUp(NumElements * sizeof(uptr), getPageSizeCached());180Buffer Buf;181if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {182Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());183Buf.BufferIndex = StaticBufferCount;184}185return Buf;186}187188HybridMutex Mutex;189// '1' means that buffer index is not used. '0' means the buffer is in use.190uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);191uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);192};193194// A Region page map is used to record the usage of pages in the regions. It195// implements a packed array of Counters. Each counter occupies 2^N bits, enough196// to store counter's MaxValue. Ctor will try to use a static buffer first, and197// if that fails (the buffer is too small or already locked), will allocate the198// required Buffer via map(). The caller is expected to check whether the199// initialization was successful by checking isAllocated() result. For200// performance sake, none of the accessors check the validity of the arguments,201// It is assumed that Index is always in [0, N) range and the value is not202// incremented past MaxValue.203class RegionPageMap {204public:205RegionPageMap()206: Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),207PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),208BufferNumElements(0) {}209RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {210reset(NumberOfRegions, CountersPerRegion, MaxValue);211}212~RegionPageMap() {213if (!isAllocated())214return;215Buffers.releaseBuffer(Buffer);216Buffer = {};217}218219// Lock of `StaticBuffer` is acquired conditionally and there's no easy way to220// specify the thread-safety attribute properly in current code structure.221// Besides, it's the only place we may want to check thread safety. Therefore,222// it's fine to bypass the thread-safety analysis now.223void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {224DCHECK_GT(NumberOfRegion, 0);225DCHECK_GT(CountersPerRegion, 0);226DCHECK_GT(MaxValue, 0);227228Regions = NumberOfRegion;229NumCounters = CountersPerRegion;230231constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;232// Rounding counter storage size up to the power of two allows for using233// bit shifts calculating particular counter's Index and offset.234const uptr CounterSizeBits =235roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);236DCHECK_LE(CounterSizeBits, MaxCounterBits);237CounterSizeBitsLog = getLog2(CounterSizeBits);238CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);239240const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;241DCHECK_GT(PackingRatio, 0);242PackingRatioLog = getLog2(PackingRatio);243BitOffsetMask = PackingRatio - 1;244245SizePerRegion =246roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>247PackingRatioLog;248BufferNumElements = SizePerRegion * Regions;249Buffer = Buffers.getBuffer(BufferNumElements);250}251252bool isAllocated() const { return Buffer.Data != nullptr; }253254uptr getCount() const { return NumCounters; }255256uptr get(uptr Region, uptr I) const {257DCHECK_LT(Region, Regions);258DCHECK_LT(I, NumCounters);259const uptr Index = I >> PackingRatioLog;260const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;261return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &262CounterMask;263}264265void inc(uptr Region, uptr I) const {266DCHECK_LT(get(Region, I), CounterMask);267const uptr Index = I >> PackingRatioLog;268const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;269DCHECK_LT(BitOffset, SCUDO_WORDSIZE);270DCHECK_EQ(isAllCounted(Region, I), false);271Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)272<< BitOffset;273}274275void incN(uptr Region, uptr I, uptr N) const {276DCHECK_GT(N, 0U);277DCHECK_LE(N, CounterMask);278DCHECK_LE(get(Region, I), CounterMask - N);279const uptr Index = I >> PackingRatioLog;280const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;281DCHECK_LT(BitOffset, SCUDO_WORDSIZE);282DCHECK_EQ(isAllCounted(Region, I), false);283Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;284}285286void incRange(uptr Region, uptr From, uptr To) const {287DCHECK_LE(From, To);288const uptr Top = Min(To + 1, NumCounters);289for (uptr I = From; I < Top; I++)290inc(Region, I);291}292293// Set the counter to the max value. Note that the max number of blocks in a294// page may vary. To provide an easier way to tell if all the blocks are295// counted for different pages, set to the same max value to denote the296// all-counted status.297void setAsAllCounted(uptr Region, uptr I) const {298DCHECK_LE(get(Region, I), CounterMask);299const uptr Index = I >> PackingRatioLog;300const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;301DCHECK_LT(BitOffset, SCUDO_WORDSIZE);302Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;303}304void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {305DCHECK_LE(From, To);306const uptr Top = Min(To + 1, NumCounters);307for (uptr I = From; I < Top; I++)308setAsAllCounted(Region, I);309}310311bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {312const uptr Count = get(Region, I);313if (Count == CounterMask)314return true;315if (Count == MaxCount) {316setAsAllCounted(Region, I);317return true;318}319return false;320}321bool isAllCounted(uptr Region, uptr I) const {322return get(Region, I) == CounterMask;323}324325uptr getBufferNumElements() const { return BufferNumElements; }326327private:328// We may consider making this configurable if there are cases which may329// benefit from this.330static const uptr StaticBufferCount = 2U;331static const uptr StaticBufferNumElements = 512U;332using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;333static BufferPoolT Buffers;334335uptr Regions;336uptr NumCounters;337uptr CounterSizeBitsLog;338uptr CounterMask;339uptr PackingRatioLog;340uptr BitOffsetMask;341342uptr SizePerRegion;343uptr BufferNumElements;344BufferPoolT::Buffer Buffer;345};346347template <class ReleaseRecorderT> class FreePagesRangeTracker {348public:349explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)350: Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}351352void processNextPage(bool Released) {353if (Released) {354if (!InRange) {355CurrentRangeStatePage = CurrentPage;356InRange = true;357}358} else {359closeOpenedRange();360}361CurrentPage++;362}363364void skipPages(uptr N) {365closeOpenedRange();366CurrentPage += N;367}368369void finish() { closeOpenedRange(); }370371private:372void closeOpenedRange() {373if (InRange) {374Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),375(CurrentPage << PageSizeLog));376InRange = false;377}378}379380ReleaseRecorderT &Recorder;381const uptr PageSizeLog;382bool InRange = false;383uptr CurrentPage = 0;384uptr CurrentRangeStatePage = 0;385};386387struct PageReleaseContext {388PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,389uptr ReleaseOffset = 0)390: BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {391PageSize = getPageSizeCached();392if (BlockSize <= PageSize) {393if (PageSize % BlockSize == 0) {394// Same number of chunks per page, no cross overs.395FullPagesBlockCountMax = PageSize / BlockSize;396SameBlockCountPerPage = true;397} else if (BlockSize % (PageSize % BlockSize) == 0) {398// Some chunks are crossing page boundaries, which means that the page399// contains one or two partial chunks, but all pages contain the same400// number of chunks.401FullPagesBlockCountMax = PageSize / BlockSize + 1;402SameBlockCountPerPage = true;403} else {404// Some chunks are crossing page boundaries, which means that the page405// contains one or two partial chunks.406FullPagesBlockCountMax = PageSize / BlockSize + 2;407SameBlockCountPerPage = false;408}409} else {410if (BlockSize % PageSize == 0) {411// One chunk covers multiple pages, no cross overs.412FullPagesBlockCountMax = 1;413SameBlockCountPerPage = true;414} else {415// One chunk covers multiple pages, Some chunks are crossing page416// boundaries. Some pages contain one chunk, some contain two.417FullPagesBlockCountMax = 2;418SameBlockCountPerPage = false;419}420}421422// TODO: For multiple regions, it's more complicated to support partial423// region marking (which includes the complexity of how to handle the last424// block in a region). We may consider this after markFreeBlocks() accepts425// only free blocks from the same region.426if (NumberOfRegions != 1)427DCHECK_EQ(ReleaseOffset, 0U);428429PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;430PageSizeLog = getLog2(PageSize);431ReleasePageOffset = ReleaseOffset >> PageSizeLog;432}433434// PageMap is lazily allocated when markFreeBlocks() is invoked.435bool hasBlockMarked() const {436return PageMap.isAllocated();437}438439bool ensurePageMapAllocated() {440if (PageMap.isAllocated())441return true;442PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);443// TODO: Log some message when we fail on PageMap allocation.444return PageMap.isAllocated();445}446447// Mark all the blocks in the given range [From, to). Instead of visiting all448// the blocks, we will just mark the page as all counted. Note the `From` and449// `To` has to be page aligned but with one exception, if `To` is equal to the450// RegionSize, it's not necessary to be aligned with page size.451bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,452const uptr RegionIndex, const uptr RegionSize) {453DCHECK_LT(From, To);454DCHECK_LE(To, Base + RegionSize);455DCHECK_EQ(From % PageSize, 0U);456DCHECK_LE(To - From, RegionSize);457458if (!ensurePageMapAllocated())459return false;460461uptr FromInRegion = From - Base;462uptr ToInRegion = To - Base;463uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);464465// The straddling block sits across entire range.466if (FirstBlockInRange >= ToInRegion)467return true;468469// First block may not sit at the first pape in the range, move470// `FromInRegion` to the first block page.471FromInRegion = roundDown(FirstBlockInRange, PageSize);472473// When The first block is not aligned to the range boundary, which means474// there is a block sitting acorss `From`, that looks like,475//476// From To477// V V478// +-----------------------------------------------+479// +-----+-----+-----+-----+480// | | | | | ...481// +-----+-----+-----+-----+482// |- first page -||- second page -||- ...483//484// Therefore, we can't just mark the first page as all counted. Instead, we485// increment the number of blocks in the first page in the page map and486// then round up the `From` to the next page.487if (FirstBlockInRange != FromInRegion) {488DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);489uptr NumBlocksInFirstPage =490(FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /491BlockSize;492PageMap.incN(RegionIndex, getPageIndex(FromInRegion),493NumBlocksInFirstPage);494FromInRegion = roundUp(FromInRegion + 1, PageSize);495}496497uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);498499// Note that LastBlockInRange may be smaller than `FromInRegion` at this500// point because it may contain only one block in the range.501502// When the last block sits across `To`, we can't just mark the pages503// occupied by the last block as all counted. Instead, we increment the504// counters of those pages by 1. The exception is that if it's the last505// block in the region, it's fine to mark those pages as all counted.506if (LastBlockInRange + BlockSize != RegionSize) {507DCHECK_EQ(ToInRegion % PageSize, 0U);508// The case below is like,509//510// From To511// V V512// +----------------------------------------+513// +-----+-----+-----+-----+514// | | | | | ...515// +-----+-----+-----+-----+516// ... -||- last page -||- next page -|517//518// The last block is not aligned to `To`, we need to increment the519// counter of `next page` by 1.520if (LastBlockInRange + BlockSize != ToInRegion) {521PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),522getPageIndex(LastBlockInRange + BlockSize - 1));523}524} else {525ToInRegion = RegionSize;526}527528// After handling the first page and the last block, it's safe to mark any529// page in between the range [From, To).530if (FromInRegion < ToInRegion) {531PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),532getPageIndex(ToInRegion - 1));533}534535return true;536}537538template <class TransferBatchT, typename DecompactPtrT>539bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,540DecompactPtrT DecompactPtr, const uptr Base,541const uptr RegionIndex, const uptr RegionSize,542bool MayContainLastBlockInRegion) {543if (!ensurePageMapAllocated())544return false;545546if (MayContainLastBlockInRegion) {547const uptr LastBlockInRegion =548((RegionSize / BlockSize) - 1U) * BlockSize;549// The last block in a region may not use the entire page, we mark the550// following "pretend" memory block(s) as free in advance.551//552// Region Boundary553// v554// -----+-----------------------+555// | Last Page | <- Rounded Region Boundary556// -----+-----------------------+557// |-----||- trailing blocks -|558// ^559// last block560const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);561const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;562// If the difference between `RoundedRegionSize` and563// `TrailingBlockBase` is larger than a page, that implies the reported564// `RegionSize` may not be accurate.565DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);566567// Only the last page touched by the last block needs to mark the trailing568// blocks. Note that if the last "pretend" block straddles the boundary,569// we still have to count it in so that the logic of counting the number570// of blocks on a page is consistent.571uptr NumTrailingBlocks =572(roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +573BlockSize - 1) /574BlockSize;575if (NumTrailingBlocks > 0) {576PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),577NumTrailingBlocks);578}579}580581// Iterate over free chunks and count how many free chunks affect each582// allocated page.583if (BlockSize <= PageSize && PageSize % BlockSize == 0) {584// Each chunk affects one page only.585for (const auto &It : FreeList) {586for (u16 I = 0; I < It.getCount(); I++) {587const uptr PInRegion = DecompactPtr(It.get(I)) - Base;588DCHECK_LT(PInRegion, RegionSize);589PageMap.inc(RegionIndex, getPageIndex(PInRegion));590}591}592} else {593// In all other cases chunks might affect more than one page.594DCHECK_GE(RegionSize, BlockSize);595for (const auto &It : FreeList) {596for (u16 I = 0; I < It.getCount(); I++) {597const uptr PInRegion = DecompactPtr(It.get(I)) - Base;598PageMap.incRange(RegionIndex, getPageIndex(PInRegion),599getPageIndex(PInRegion + BlockSize - 1));600}601}602}603604return true;605}606607uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }608uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }609610uptr BlockSize;611uptr NumberOfRegions;612// For partial region marking, some pages in front are not needed to be613// counted.614uptr ReleasePageOffset;615uptr PageSize;616uptr PagesCount;617uptr PageSizeLog;618uptr FullPagesBlockCountMax;619bool SameBlockCountPerPage;620RegionPageMap PageMap;621};622623// Try to release the page which doesn't have any in-used block, i.e., they are624// all free blocks. The `PageMap` will record the number of free blocks in each625// page.626template <class ReleaseRecorderT, typename SkipRegionT>627NOINLINE void628releaseFreeMemoryToOS(PageReleaseContext &Context,629ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {630const uptr PageSize = Context.PageSize;631const uptr BlockSize = Context.BlockSize;632const uptr PagesCount = Context.PagesCount;633const uptr NumberOfRegions = Context.NumberOfRegions;634const uptr ReleasePageOffset = Context.ReleasePageOffset;635const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;636const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;637RegionPageMap &PageMap = Context.PageMap;638639// Iterate over pages detecting ranges of pages with chunk Counters equal640// to the expected number of chunks for the particular page.641FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);642if (SameBlockCountPerPage) {643// Fast path, every page has the same number of chunks affecting it.644for (uptr I = 0; I < NumberOfRegions; I++) {645if (SkipRegion(I)) {646RangeTracker.skipPages(PagesCount);647continue;648}649for (uptr J = 0; J < PagesCount; J++) {650const bool CanRelease =651PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);652RangeTracker.processNextPage(CanRelease);653}654}655} else {656// Slow path, go through the pages keeping count how many chunks affect657// each page.658const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;659const uptr Pnc = Pn * BlockSize;660// The idea is to increment the current page pointer by the first chunk661// size, middle portion size (the portion of the page covered by chunks662// except the first and the last one) and then the last chunk size, adding663// up the number of chunks on the current page and checking on every step664// whether the page boundary was crossed.665for (uptr I = 0; I < NumberOfRegions; I++) {666if (SkipRegion(I)) {667RangeTracker.skipPages(PagesCount);668continue;669}670uptr PrevPageBoundary = 0;671uptr CurrentBoundary = 0;672if (ReleasePageOffset > 0) {673PrevPageBoundary = ReleasePageOffset * PageSize;674CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);675}676for (uptr J = 0; J < PagesCount; J++) {677const uptr PageBoundary = PrevPageBoundary + PageSize;678uptr BlocksPerPage = Pn;679if (CurrentBoundary < PageBoundary) {680if (CurrentBoundary > PrevPageBoundary)681BlocksPerPage++;682CurrentBoundary += Pnc;683if (CurrentBoundary < PageBoundary) {684BlocksPerPage++;685CurrentBoundary += BlockSize;686}687}688PrevPageBoundary = PageBoundary;689const bool CanRelease =690PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);691RangeTracker.processNextPage(CanRelease);692}693}694}695RangeTracker.finish();696}697698} // namespace scudo699700#endif // SCUDO_RELEASE_H_701702703