From f7e100a9b9cfd9b3dc06a723d669d99025656b6b Mon Sep 17 00:00:00 2001 From: Nathaniel Wesley Filardo Date: Mon, 30 May 2022 23:25:44 +0100 Subject: [PATCH] RFC, NFC: refactor ranges to be nested templates This way, we don't have to specify a Parent when we're just interested in Pipe-ing things together. We could have called these inner classes Apply and left the Pipe implementation alone, but it's probably better to call them Type and adjust the Pipe code. --- src/snmalloc/backend/backend.h | 21 +- src/snmalloc/backend_helpers/commitrange.h | 48 +-- src/snmalloc/backend_helpers/globalrange.h | 53 ++- .../backend_helpers/largebuddyrange.h | 314 +++++++++--------- src/snmalloc/backend_helpers/logrange.h | 64 ++-- .../backend_helpers/pagemapregisterrange.h | 49 ++- src/snmalloc/backend_helpers/range_helpers.h | 2 +- .../backend_helpers/smallbuddyrange.h | 139 ++++---- src/snmalloc/backend_helpers/statsrange.h | 75 ++--- src/snmalloc/backend_helpers/subrange.h | 62 ++-- 10 files changed, 401 insertions(+), 426 deletions(-) diff --git a/src/snmalloc/backend/backend.h b/src/snmalloc/backend/backend.h index 5486e42aa..8eaf69f26 100644 --- a/src/snmalloc/backend/backend.h +++ b/src/snmalloc/backend/backend.h @@ -122,7 +122,7 @@ namespace snmalloc Base, LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, LogRange<2>, - GlobalRange<>>; + GlobalRange>; #ifdef SNMALLOC_META_PROTECTED // Introduce two global ranges, so we don't mix Object and Meta @@ -130,44 +130,43 @@ namespace snmalloc GlobalR, LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, LogRange<3>, - GlobalRange<>>; + GlobalRange>; using CentralMetaRange = Pipe< GlobalR, SubRange, // Use SubRange to introduce guard pages. LargeBuddyRange<24, bits::BITS - 1, Pagemap, MinBaseSizeBits()>, LogRange<4>, - GlobalRange<>>; + GlobalRange>; // Source for object allocations - using StatsObject = - Pipe, StatsRange<>>; + using StatsObject = Pipe, StatsRange>; using ObjectRange = Pipe, LogRange<5>>; - using StatsMeta = Pipe, StatsRange<>>; + using StatsMeta = Pipe, StatsRange>; using MetaRange = Pipe< StatsMeta, LargeBuddyRange<21 - 6, bits::BITS - 1, Pagemap>, - SmallBuddyRange<>>; + SmallBuddyRange>; // Create global range that can service small meta-data requests. // Don't want to add this to the CentralMetaRange to move Commit outside the // lock on the common case. - using GlobalMetaRange = Pipe, GlobalRange<>>; + using GlobalMetaRange = Pipe; using Stats = StatsCombiner; #else // Source for object allocations and metadata // No separation between the two - using Stats = Pipe>; + using Stats = Pipe; using ObjectRange = Pipe< Stats, CommitRange, LargeBuddyRange<21, 21, Pagemap>, - SmallBuddyRange<>>; - using GlobalMetaRange = Pipe>; + SmallBuddyRange>; + using GlobalMetaRange = Pipe; #endif struct LocalState diff --git a/src/snmalloc/backend_helpers/commitrange.h b/src/snmalloc/backend_helpers/commitrange.h index 938c23846..8e3c4d3b6 100644 --- a/src/snmalloc/backend_helpers/commitrange.h +++ b/src/snmalloc/backend_helpers/commitrange.h @@ -5,36 +5,36 @@ namespace snmalloc { - template - class CommitRange : public ContainsParent + template + struct CommitRange { - using ContainsParent::parent; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = CommitRange; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool Aligned = ParentRange::Aligned; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + constexpr Type() = default; - constexpr CommitRange() = default; + capptr::Chunk alloc_range(size_t size) + { + SNMALLOC_ASSERT(size >= OS_PAGE_SIZE); - capptr::Chunk alloc_range(size_t size) - { - auto range = parent.alloc_range(size); - if (range != nullptr) - PAL::template notify_using(range.unsafe_ptr(), size); - return range; - } + auto range = parent.alloc_range(size); + if (range != nullptr) + PAL::template notify_using(range.unsafe_ptr(), size); + return range; + } - void dealloc_range(capptr::Chunk base, size_t size) - { - PAL::notify_not_using(base.unsafe_ptr(), size); - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + PAL::notify_not_using(base.unsafe_ptr(), size); + parent.dealloc_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/globalrange.h b/src/snmalloc/backend_helpers/globalrange.h index 80f1bca15..3e01c3d09 100644 --- a/src/snmalloc/backend_helpers/globalrange.h +++ b/src/snmalloc/backend_helpers/globalrange.h @@ -9,40 +9,37 @@ namespace snmalloc * Makes the supplied ParentRange into a global variable, * and protects access with a lock. */ - template - class GlobalRange : public StaticParent + struct GlobalRange { - using StaticParent::parent; - - /** - * This is infrequently used code, a spin lock simplifies the code - * considerably, and should never be on the fast path. - */ - SNMALLOC_REQUIRE_CONSTINIT static inline FlagWord spin_lock{}; + template + class Type : public StaticParent + { + using StaticParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = GlobalRange; + /** + * This is infrequently used code, a spin lock simplifies the code + * considerably, and should never be on the fast path. + */ + SNMALLOC_REQUIRE_CONSTINIT static inline FlagWord spin_lock{}; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = true; + static constexpr bool ConcurrencySafe = true; - constexpr GlobalRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - FlagLock lock(spin_lock); - return parent.alloc_range(size); - } + capptr::Chunk alloc_range(size_t size) + { + FlagLock lock(spin_lock); + return parent.alloc_range(size); + } - void dealloc_range(capptr::Chunk base, size_t size) - { - FlagLock lock(spin_lock); - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + FlagLock lock(spin_lock); + parent.dealloc_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/largebuddyrange.h b/src/snmalloc/backend_helpers/largebuddyrange.h index 47e359620..707c92397 100644 --- a/src/snmalloc/backend_helpers/largebuddyrange.h +++ b/src/snmalloc/backend_helpers/largebuddyrange.h @@ -187,203 +187,195 @@ namespace snmalloc size_t REFILL_SIZE_BITS, size_t MAX_SIZE_BITS, SNMALLOC_CONCEPT(ConceptBuddyRangeMeta) Pagemap, - size_t MIN_REFILL_SIZE_BITS = 0, - typename ParentRange = EmptyRange> - class LargeBuddyRange : public ContainsParent + size_t MIN_REFILL_SIZE_BITS = 0> + struct LargeBuddyRange { - using ContainsParent::parent; - - /** - * Maximum size of a refill - */ - static constexpr size_t REFILL_SIZE = bits::one_at_bit(REFILL_SIZE_BITS); - - /** - * Minimum size of a refill - */ - static constexpr size_t MIN_REFILL_SIZE = - bits::one_at_bit(MIN_REFILL_SIZE_BITS); - - /** - * The size of memory requested so far. - * - * This is used to determine the refill size. - */ - size_t requested_total = 0; - - /** - * Buddy allocator used to represent this range of memory. - */ - Buddy, MIN_CHUNK_BITS, MAX_SIZE_BITS> buddy_large; - - /** - * The parent might not support deallocation if this buddy allocator covers - * the whole range. Uses template insanity to make this work. - */ - template - std::enable_if_t - parent_dealloc_range(capptr::Chunk base, size_t size) + template + class Type : public ContainsParent { - static_assert( - MAX_SIZE_BITS != (bits::BITS - 1), "Don't set SFINAE parameter"); - parent.dealloc_range(base, size); - } - - void dealloc_overflow(capptr::Chunk overflow) - { - if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) - { - if (overflow != nullptr) - { - parent.dealloc_range(overflow, bits::one_at_bit(MAX_SIZE_BITS)); - } - } - else + using ContainsParent::parent; + + /** + * Maximum size of a refill + */ + static constexpr size_t REFILL_SIZE = bits::one_at_bit(REFILL_SIZE_BITS); + + /** + * Minimum size of a refill + */ + static constexpr size_t MIN_REFILL_SIZE = + bits::one_at_bit(MIN_REFILL_SIZE_BITS); + + /** + * The size of memory requested so far. + * + * This is used to determine the refill size. + */ + size_t requested_total = 0; + + /** + * Buddy allocator used to represent this range of memory. + */ + Buddy, MIN_CHUNK_BITS, MAX_SIZE_BITS> buddy_large; + + /** + * The parent might not support deallocation if this buddy allocator + * covers the whole range. Uses template insanity to make this work. + */ + template + std::enable_if_t + parent_dealloc_range(capptr::Chunk base, size_t size) { - if (overflow != nullptr) - abort(); + static_assert( + MAX_SIZE_BITS != (bits::BITS - 1), "Don't set SFINAE parameter"); + parent.dealloc_range(base, size); } - } - - /** - * Add a range of memory to the address space. - * Divides blocks into power of two sizes with natural alignment - */ - void add_range(capptr::Chunk base, size_t length) - { - range_to_pow_2_blocks( - base, length, [this](capptr::Chunk base, size_t align, bool) { - auto overflow = capptr::Chunk(reinterpret_cast( - buddy_large.add_block(base.unsafe_uintptr(), align))); - - dealloc_overflow(overflow); - }); - } - capptr::Chunk refill(size_t size) - { - if (ParentRange::Aligned) + void dealloc_overflow(capptr::Chunk overflow) { - // Use amount currently requested to determine refill size. - // This will gradually increase the usage of the parent range. - // So small examples can grow local caches slowly, and larger - // examples will grow them by the refill size. - // - // The heuristic is designed to allocate the following sequence for - // 16KiB requests 16KiB, 16KiB, 32Kib, 64KiB, ..., REFILL_SIZE/2, - // REFILL_SIZE, REFILL_SIZE, ... Hence if this if they are coming from a - // contiguous aligned range, then they could be consolidated. This - // depends on the ParentRange behaviour. - size_t refill_size = bits::min(REFILL_SIZE, requested_total); - refill_size = bits::max(refill_size, MIN_REFILL_SIZE); - refill_size = bits::max(refill_size, size); - refill_size = bits::next_pow2(refill_size); - - auto refill_range = parent.alloc_range(refill_size); - if (refill_range != nullptr) + if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) + { + if (overflow != nullptr) + { + parent.dealloc_range(overflow, bits::one_at_bit(MAX_SIZE_BITS)); + } + } + else { - requested_total += refill_size; - add_range(pointer_offset(refill_range, size), refill_size - size); + if (overflow != nullptr) + abort(); } - return refill_range; } - // Note the unaligned parent path does not use - // requested_total in the heuristic for the initial size - // this is because the request needs to introduce alignment. - // Currently the unaligned variant is not used as a local cache. - // So the gradual growing of refill_size is not needed. - - // Need to overallocate to get the alignment right. - bool overflow = false; - size_t needed_size = bits::umul(size, 2, overflow); - if (overflow) + /** + * Add a range of memory to the address space. + * Divides blocks into power of two sizes with natural alignment + */ + void add_range(capptr::Chunk base, size_t length) { - return nullptr; + range_to_pow_2_blocks( + base, length, [this](capptr::Chunk base, size_t align, bool) { + auto overflow = capptr::Chunk(reinterpret_cast( + buddy_large.add_block(base.unsafe_uintptr(), align))); + + dealloc_overflow(overflow); + }); } - auto refill_size = bits::max(needed_size, REFILL_SIZE); - while (needed_size <= refill_size) + capptr::Chunk refill(size_t size) { - auto refill = parent.alloc_range(refill_size); - - if (refill != nullptr) + if (ParentRange::Aligned) { - requested_total += refill_size; - add_range(refill, refill_size); + // Use amount currently requested to determine refill size. + // This will gradually increase the usage of the parent range. + // So small examples can grow local caches slowly, and larger + // examples will grow them by the refill size. + // + // The heuristic is designed to allocate the following sequence for + // 16KiB requests 16KiB, 16KiB, 32Kib, 64KiB, ..., REFILL_SIZE/2, + // REFILL_SIZE, REFILL_SIZE, ... Hence if this if they are coming from + // a contiguous aligned range, then they could be consolidated. This + // depends on the ParentRange behaviour. + size_t refill_size = bits::min(REFILL_SIZE, requested_total); + refill_size = bits::max(refill_size, MIN_REFILL_SIZE); + refill_size = bits::max(refill_size, size); + refill_size = bits::next_pow2(refill_size); + + auto refill_range = parent.alloc_range(refill_size); + if (refill_range != nullptr) + { + requested_total += refill_size; + add_range(pointer_offset(refill_range, size), refill_size - size); + } + return refill_range; + } - SNMALLOC_ASSERT(refill_size < bits::one_at_bit(MAX_SIZE_BITS)); - static_assert( - (REFILL_SIZE < bits::one_at_bit(MAX_SIZE_BITS)) || - ParentRange::Aligned, - "Required to prevent overflow."); + // Note the unaligned parent path does not use + // requested_total in the heuristic for the initial size + // this is because the request needs to introduce alignment. + // Currently the unaligned variant is not used as a local cache. + // So the gradual growing of refill_size is not needed. - return alloc_range(size); + // Need to overallocate to get the alignment right. + bool overflow = false; + size_t needed_size = bits::umul(size, 2, overflow); + if (overflow) + { + return nullptr; } - refill_size >>= 1; - } + auto refill_size = bits::max(needed_size, REFILL_SIZE); + while (needed_size <= refill_size) + { + auto refill = parent.alloc_range(refill_size); - return nullptr; - } + if (refill != nullptr) + { + requested_total += refill_size; + add_range(refill, refill_size); - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = LargeBuddyRange< - REFILL_SIZE_BITS, - MAX_SIZE_BITS, - Pagemap, - MIN_REFILL_SIZE_BITS, - ParentRange2>; + SNMALLOC_ASSERT(refill_size < bits::one_at_bit(MAX_SIZE_BITS)); + static_assert( + (REFILL_SIZE < bits::one_at_bit(MAX_SIZE_BITS)) || + ParentRange::Aligned, + "Required to prevent overflow."); - static constexpr bool Aligned = true; + return alloc_range(size); + } - static constexpr bool ConcurrencySafe = false; + refill_size >>= 1; + } - constexpr LargeBuddyRange() = default; + return nullptr; + } - capptr::Chunk alloc_range(size_t size) - { - SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); - SNMALLOC_ASSERT(bits::is_pow2(size)); + public: + static constexpr bool Aligned = true; - if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + static constexpr bool ConcurrencySafe = false; + + constexpr Type() = default; + + capptr::Chunk alloc_range(size_t size) { - if (ParentRange::Aligned) - return parent.alloc_range(size); + SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); + SNMALLOC_ASSERT(bits::is_pow2(size)); - return nullptr; - } + if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + { + if (ParentRange::Aligned) + return parent.alloc_range(size); - auto result = capptr::Chunk( - reinterpret_cast(buddy_large.remove_block(size))); + return nullptr; + } - if (result != nullptr) - return result; + auto result = capptr::Chunk( + reinterpret_cast(buddy_large.remove_block(size))); - return refill(size); - } + if (result != nullptr) + return result; - void dealloc_range(capptr::Chunk base, size_t size) - { - SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); - SNMALLOC_ASSERT(bits::is_pow2(size)); + return refill(size); + } - if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) + void dealloc_range(capptr::Chunk base, size_t size) { - if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); + SNMALLOC_ASSERT(bits::is_pow2(size)); + + if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) { - parent_dealloc_range(base, size); - return; + if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + { + parent_dealloc_range(base, size); + return; + } } - } - auto overflow = capptr::Chunk(reinterpret_cast( - buddy_large.add_block(base.unsafe_uintptr(), size))); - dealloc_overflow(overflow); - } + auto overflow = capptr::Chunk(reinterpret_cast( + buddy_large.add_block(base.unsafe_uintptr(), size))); + dealloc_overflow(overflow); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/logrange.h b/src/snmalloc/backend_helpers/logrange.h index 7c5308556..9bc1d4731 100644 --- a/src/snmalloc/backend_helpers/logrange.h +++ b/src/snmalloc/backend_helpers/logrange.h @@ -11,51 +11,49 @@ namespace snmalloc * * ParentRange is what the range is logging calls to. */ - template - class LogRange : public ContainsParent + template + struct LogRange { - using ContainsParent::parent; - - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = LogRange; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - constexpr LogRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { + capptr::Chunk alloc_range(size_t size) + { #ifdef SNMALLOC_TRACING - message<1024>("Call alloc_range({}) on {}", size, RangeName); + message<1024>("Call alloc_range({}) on {}", size, RangeName); #endif - auto range = parent.alloc_range(size); + auto range = parent.alloc_range(size); #ifdef SNMALLOC_TRACING - message<1024>( - "{} = alloc_range({}) in {}", range.unsafe_ptr(), size, RangeName); + message<1024>( + "{} = alloc_range({}) in {}", range.unsafe_ptr(), size, RangeName); #endif - return range; - } + return range; + } - void dealloc_range(capptr::Chunk base, size_t size) - { + void dealloc_range(capptr::Chunk base, size_t size) + { #ifdef SNMALLOC_TRACING - message<1024>( - "dealloc_range({}, {}}) on {}", base.unsafe_ptr(), size, RangeName); + message<1024>( + "dealloc_range({}, {}}) on {}", base.unsafe_ptr(), size, RangeName); #endif - parent.dealloc_range(base, size); + parent.dealloc_range(base, size); #ifdef SNMALLOC_TRACING - message<1024>( - "Done dealloc_range({}, {}})! on {}", - base.unsafe_ptr(), - size, - RangeName); + message<1024>( + "Done dealloc_range({}, {}})! on {}", + base.unsafe_ptr(), + size, + RangeName); #endif - } + } + }; }; -} // namespace snmalloc \ No newline at end of file +} // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/pagemapregisterrange.h b/src/snmalloc/backend_helpers/pagemapregisterrange.h index cd52640bc..6e420897d 100644 --- a/src/snmalloc/backend_helpers/pagemapregisterrange.h +++ b/src/snmalloc/backend_helpers/pagemapregisterrange.h @@ -8,40 +8,37 @@ namespace snmalloc { template< SNMALLOC_CONCEPT(ConceptBackendMetaRange) Pagemap, - bool CanConsolidate = true, - typename ParentRange = EmptyRange> - class PagemapRegisterRange : public ContainsParent + bool CanConsolidate = true> + struct PagemapRegisterRange { - using ContainsParent::parent; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = PagemapRegisterRange; + public: + constexpr Type() = default; - constexpr PagemapRegisterRange() = default; + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool Aligned = ParentRange::Aligned; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + capptr::Chunk alloc_range(size_t size) + { + auto base = parent.alloc_range(size); - capptr::Chunk alloc_range(size_t size) - { - auto base = parent.alloc_range(size); + if (base != nullptr) + Pagemap::register_range(address_cast(base), size); - if (base != nullptr) - Pagemap::register_range(address_cast(base), size); + if (!CanConsolidate) + { + // Mark start of allocation in pagemap. + auto& entry = Pagemap::get_metaentry_mut(address_cast(base)); + entry.set_boundary(); + } - if (!CanConsolidate) - { - // Mark start of allocation in pagemap. - auto& entry = Pagemap::get_metaentry_mut(address_cast(base)); - entry.set_boundary(); + return base; } - - return base; - } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/range_helpers.h b/src/snmalloc/backend_helpers/range_helpers.h index fb7e3e0b9..90ee8474a 100644 --- a/src/snmalloc/backend_helpers/range_helpers.h +++ b/src/snmalloc/backend_helpers/range_helpers.h @@ -61,7 +61,7 @@ namespace snmalloc { public: using result = - typename PipeImpl, Rest...>::result; + typename PipeImpl, Rest...>::result; }; /** diff --git a/src/snmalloc/backend_helpers/smallbuddyrange.h b/src/snmalloc/backend_helpers/smallbuddyrange.h index 780d8cdec..fc42d6f7d 100644 --- a/src/snmalloc/backend_helpers/smallbuddyrange.h +++ b/src/snmalloc/backend_helpers/smallbuddyrange.h @@ -143,100 +143,97 @@ namespace snmalloc } }; - template - class SmallBuddyRange : public ContainsParent + struct SmallBuddyRange { - using ContainsParent::parent; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - static constexpr size_t MIN_BITS = - bits::next_pow2_bits_const(sizeof(FreeChunk)); + static constexpr size_t MIN_BITS = + bits::next_pow2_bits_const(sizeof(FreeChunk)); - Buddy buddy_small; + Buddy buddy_small; - /** - * Add a range of memory to the address space. - * Divides blocks into power of two sizes with natural alignment - */ - void add_range(capptr::Chunk base, size_t length) - { - range_to_pow_2_blocks( - base, length, [this](capptr::Chunk base, size_t align, bool) { - capptr::Chunk overflow = - buddy_small.add_block(base.as_reinterpret(), align) - .template as_reinterpret(); - if (overflow != nullptr) - parent.dealloc_range(overflow, bits::one_at_bit(MIN_CHUNK_BITS)); - }); - } - - capptr::Chunk refill(size_t size) - { - auto refill = parent.alloc_range(MIN_CHUNK_SIZE); + /** + * Add a range of memory to the address space. + * Divides blocks into power of two sizes with natural alignment + */ + void add_range(capptr::Chunk base, size_t length) + { + range_to_pow_2_blocks( + base, length, [this](capptr::Chunk base, size_t align, bool) { + capptr::Chunk overflow = + buddy_small.add_block(base.as_reinterpret(), align) + .template as_reinterpret(); + if (overflow != nullptr) + parent.dealloc_range(overflow, bits::one_at_bit(MIN_CHUNK_BITS)); + }); + } - if (refill != nullptr) - add_range(pointer_offset(refill, size), MIN_CHUNK_SIZE - size); + capptr::Chunk refill(size_t size) + { + auto refill = parent.alloc_range(MIN_CHUNK_SIZE); - return refill; - } + if (refill != nullptr) + add_range(pointer_offset(refill, size), MIN_CHUNK_SIZE - size); - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = SmallBuddyRange; + return refill; + } - static constexpr bool Aligned = true; - static_assert(ParentRange::Aligned, "ParentRange must be aligned"); + public: + static constexpr bool Aligned = true; + static_assert(ParentRange::Aligned, "ParentRange must be aligned"); - static constexpr bool ConcurrencySafe = false; + static constexpr bool ConcurrencySafe = false; - constexpr SmallBuddyRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - if (size >= MIN_CHUNK_SIZE) + capptr::Chunk alloc_range(size_t size) { - return parent.alloc_range(size); - } + if (size >= MIN_CHUNK_SIZE) + { + return parent.alloc_range(size); + } - auto result = buddy_small.remove_block(size); - if (result != nullptr) - { - result->left = nullptr; - result->right = nullptr; - return result.template as_reinterpret(); + auto result = buddy_small.remove_block(size); + if (result != nullptr) + { + result->left = nullptr; + result->right = nullptr; + return result.template as_reinterpret(); + } + return refill(size); } - return refill(size); - } - capptr::Chunk alloc_range_with_leftover(size_t size) - { - SNMALLOC_ASSERT(size <= MIN_CHUNK_SIZE); + capptr::Chunk alloc_range_with_leftover(size_t size) + { + SNMALLOC_ASSERT(size <= MIN_CHUNK_SIZE); - auto rsize = bits::next_pow2(size); + auto rsize = bits::next_pow2(size); - auto result = alloc_range(rsize); + auto result = alloc_range(rsize); - if (result == nullptr) - return nullptr; + if (result == nullptr) + return nullptr; - auto remnant = pointer_offset(result, size); + auto remnant = pointer_offset(result, size); - add_range(remnant, rsize - size); + add_range(remnant, rsize - size); - return result.template as_reinterpret(); - } + return result.template as_reinterpret(); + } - void dealloc_range(capptr::Chunk base, size_t size) - { - if (size >= MIN_CHUNK_SIZE) + void dealloc_range(capptr::Chunk base, size_t size) { - parent.dealloc_range(base, size); - return; - } + if (size >= MIN_CHUNK_SIZE) + { + parent.dealloc_range(base, size); + return; + } - add_range(base, size); - } + add_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/statsrange.h b/src/snmalloc/backend_helpers/statsrange.h index 2f6fdbc03..f38f6e999 100644 --- a/src/snmalloc/backend_helpers/statsrange.h +++ b/src/snmalloc/backend_helpers/statsrange.h @@ -10,58 +10,55 @@ namespace snmalloc /** * Used to measure memory usage. */ - template - class StatsRange : public ContainsParent + struct StatsRange { - using ContainsParent::parent; - - static inline std::atomic current_usage{}; - static inline std::atomic peak_usage{}; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = StatsRange; + static inline std::atomic current_usage{}; + static inline std::atomic peak_usage{}; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - constexpr StatsRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - auto result = parent.alloc_range(size); - if (result != nullptr) + capptr::Chunk alloc_range(size_t size) { - auto prev = current_usage.fetch_add(size); - auto curr = peak_usage.load(); - while (curr < prev + size) + auto result = parent.alloc_range(size); + if (result != nullptr) { - if (peak_usage.compare_exchange_weak(curr, prev + size)) - break; + auto prev = current_usage.fetch_add(size); + auto curr = peak_usage.load(); + while (curr < prev + size) + { + if (peak_usage.compare_exchange_weak(curr, prev + size)) + break; + } } + return result; } - return result; - } - void dealloc_range(capptr::Chunk base, size_t size) - { - current_usage -= size; - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + current_usage -= size; + parent.dealloc_range(base, size); + } - size_t get_current_usage() - { - return current_usage.load(); - } + size_t get_current_usage() + { + return current_usage.load(); + } - size_t get_peak_usage() - { - return peak_usage.load(); - } + size_t get_peak_usage() + { + return peak_usage.load(); + } + }; }; template diff --git a/src/snmalloc/backend_helpers/subrange.h b/src/snmalloc/backend_helpers/subrange.h index ca4f973cf..03c782539 100644 --- a/src/snmalloc/backend_helpers/subrange.h +++ b/src/snmalloc/backend_helpers/subrange.h @@ -9,43 +9,41 @@ namespace snmalloc * 2^RATIO_BITS. Will not return a the block at the start or * the end of the large allocation. */ - template - class SubRange : public ContainsParent + template + struct SubRange { - using ContainsParent::parent; - - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = SubRange; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - constexpr SubRange() = default; + public: + constexpr Type() = default; - static constexpr bool Aligned = ParentRange::Aligned; + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - capptr::Chunk alloc_range(size_t sub_size) - { - SNMALLOC_ASSERT(bits::is_pow2(sub_size)); - - auto full_size = sub_size << RATIO_BITS; - auto overblock = parent.alloc_range(full_size); - if (overblock == nullptr) - return nullptr; - - size_t offset_mask = full_size - sub_size; - // Don't use first or last block in the larger reservation - // Loop required to get uniform distribution. - size_t offset; - do + capptr::Chunk alloc_range(size_t sub_size) { - offset = get_entropy64() & offset_mask; - } while ((offset == 0) || (offset == offset_mask)); - - return pointer_offset(overblock, offset); - } + SNMALLOC_ASSERT(bits::is_pow2(sub_size)); + + auto full_size = sub_size << RATIO_BITS; + auto overblock = parent.alloc_range(full_size); + if (overblock == nullptr) + return nullptr; + + size_t offset_mask = full_size - sub_size; + // Don't use first or last block in the larger reservation + // Loop required to get uniform distribution. + size_t offset; + do + { + offset = get_entropy64() & offset_mask; + } while ((offset == 0) || (offset == offset_mask)); + + return pointer_offset(overblock, offset); + } + }; }; } // namespace snmalloc