diff --git a/src/snmalloc/backend/meta_protected_range.h b/src/snmalloc/backend/meta_protected_range.h index 9a16f9278..46cdbc56c 100644 --- a/src/snmalloc/backend/meta_protected_range.h +++ b/src/snmalloc/backend/meta_protected_range.h @@ -38,7 +38,7 @@ namespace snmalloc Pagemap, MinSizeBits>, LogRange<2>, - GlobalRange<>>; + GlobalRange>; static constexpr size_t page_size_bits = bits::next_pow2_bits_const(PAL::page_size); @@ -53,9 +53,9 @@ namespace snmalloc GlobalR, LargeBuddyRange, LogRange<3>, - GlobalRange<>, + GlobalRange, CommitRange, - StatsRange<>>; + StatsRange>; // Controls the padding around the meta-data range. // The larger the padding range the more randomisation that @@ -81,8 +81,8 @@ namespace snmalloc Pagemap, page_size_bits>, LogRange<4>, - GlobalRange<>, - StatsRange<>>; + GlobalRange, + StatsRange>; // Local caching of object range using ObjectRange = Pipe< @@ -101,7 +101,7 @@ namespace snmalloc LocalCacheSizeBits - SubRangeRatioBits, bits::BITS - 1, Pagemap>, - SmallBuddyRange<>>; + SmallBuddyRange>; public: using Stats = StatsCombiner; @@ -119,6 +119,6 @@ namespace snmalloc // Don't want to add the SmallBuddyRange to the CentralMetaRange as that // would require committing memory inside the main global lock. using GlobalMetaRange = - Pipe, GlobalRange<>>; + Pipe; }; -} // namespace snmalloc \ No newline at end of file +} // namespace snmalloc diff --git a/src/snmalloc/backend/standard_range.h b/src/snmalloc/backend/standard_range.h index b808f4cfe..d07c9a504 100644 --- a/src/snmalloc/backend/standard_range.h +++ b/src/snmalloc/backend/standard_range.h @@ -35,10 +35,10 @@ namespace snmalloc Pagemap, MinSizeBits>, LogRange<2>, - GlobalRange<>>; + GlobalRange>; // Track stats of the committed memory - using Stats = Pipe, StatsRange<>>; + using Stats = Pipe, StatsRange>; private: static constexpr size_t page_size_bits = @@ -53,11 +53,11 @@ namespace snmalloc LocalCacheSizeBits, Pagemap, page_size_bits>, - SmallBuddyRange<>>; + SmallBuddyRange>; public: // Expose a global range for the initial allocation of meta-data. - using GlobalMetaRange = Pipe>; + using GlobalMetaRange = Pipe; // Where we get user allocations from. ObjectRange object_range; @@ -69,4 +69,4 @@ namespace snmalloc return object_range; } }; -} // namespace snmalloc \ No newline at end of file +} // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/commitrange.h b/src/snmalloc/backend_helpers/commitrange.h index 11bead47d..de67a1367 100644 --- a/src/snmalloc/backend_helpers/commitrange.h +++ b/src/snmalloc/backend_helpers/commitrange.h @@ -5,46 +5,44 @@ namespace snmalloc { - template - class CommitRange : public ContainsParent + template + struct CommitRange { - using ContainsParent::parent; - - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = CommitRange; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - constexpr CommitRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - SNMALLOC_ASSERT_MSG( - (size % PAL::page_size) == 0, - "size ({}) must be a multiple of page size ({})", - size, - PAL::page_size); - auto range = parent.alloc_range(size); - if (range != nullptr) - PAL::template notify_using(range.unsafe_ptr(), size); - return range; - } + capptr::Chunk alloc_range(size_t size) + { + SNMALLOC_ASSERT_MSG( + (size % PAL::page_size) == 0, + "size ({}) must be a multiple of page size ({})", + size, + PAL::page_size); + auto range = parent.alloc_range(size); + if (range != nullptr) + PAL::template notify_using(range.unsafe_ptr(), size); + return range; + } - void dealloc_range(capptr::Chunk base, size_t size) - { - SNMALLOC_ASSERT_MSG( - (size % PAL::page_size) == 0, - "size ({}) must be a multiple of page size ({})", - size, - PAL::page_size); - PAL::notify_not_using(base.unsafe_ptr(), size); - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + SNMALLOC_ASSERT_MSG( + (size % PAL::page_size) == 0, + "size ({}) must be a multiple of page size ({})", + size, + PAL::page_size); + PAL::notify_not_using(base.unsafe_ptr(), size); + parent.dealloc_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/globalrange.h b/src/snmalloc/backend_helpers/globalrange.h index 80f1bca15..3e01c3d09 100644 --- a/src/snmalloc/backend_helpers/globalrange.h +++ b/src/snmalloc/backend_helpers/globalrange.h @@ -9,40 +9,37 @@ namespace snmalloc * Makes the supplied ParentRange into a global variable, * and protects access with a lock. */ - template - class GlobalRange : public StaticParent + struct GlobalRange { - using StaticParent::parent; - - /** - * This is infrequently used code, a spin lock simplifies the code - * considerably, and should never be on the fast path. - */ - SNMALLOC_REQUIRE_CONSTINIT static inline FlagWord spin_lock{}; + template + class Type : public StaticParent + { + using StaticParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = GlobalRange; + /** + * This is infrequently used code, a spin lock simplifies the code + * considerably, and should never be on the fast path. + */ + SNMALLOC_REQUIRE_CONSTINIT static inline FlagWord spin_lock{}; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = true; + static constexpr bool ConcurrencySafe = true; - constexpr GlobalRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - FlagLock lock(spin_lock); - return parent.alloc_range(size); - } + capptr::Chunk alloc_range(size_t size) + { + FlagLock lock(spin_lock); + return parent.alloc_range(size); + } - void dealloc_range(capptr::Chunk base, size_t size) - { - FlagLock lock(spin_lock); - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + FlagLock lock(spin_lock); + parent.dealloc_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/largebuddyrange.h b/src/snmalloc/backend_helpers/largebuddyrange.h index afeef29f5..7d315973f 100644 --- a/src/snmalloc/backend_helpers/largebuddyrange.h +++ b/src/snmalloc/backend_helpers/largebuddyrange.h @@ -187,12 +187,9 @@ namespace snmalloc size_t REFILL_SIZE_BITS, size_t MAX_SIZE_BITS, SNMALLOC_CONCEPT(IsWritablePagemap) Pagemap, - size_t MIN_REFILL_SIZE_BITS = 0, - typename ParentRange = EmptyRange> - class LargeBuddyRange : public ContainsParent + size_t MIN_REFILL_SIZE_BITS = 0> + class LargeBuddyRange { - using ContainsParent::parent; - static_assert( REFILL_SIZE_BITS <= MAX_SIZE_BITS, "REFILL_SIZE_BITS > MAX_SIZE_BITS"); static_assert( @@ -210,186 +207,182 @@ namespace snmalloc static constexpr size_t MIN_REFILL_SIZE = bits::one_at_bit(MIN_REFILL_SIZE_BITS); - /** - * The size of memory requested so far. - * - * This is used to determine the refill size. - */ - size_t requested_total = 0; - - /** - * Buddy allocator used to represent this range of memory. - */ - Buddy, MIN_CHUNK_BITS, MAX_SIZE_BITS> buddy_large; - - /** - * The parent might not support deallocation if this buddy allocator covers - * the whole range. Uses template insanity to make this work. - */ - template - std::enable_if_t - parent_dealloc_range(capptr::Chunk base, size_t size) - { - static_assert( - MAX_SIZE_BITS != (bits::BITS - 1), "Don't set SFINAE parameter"); - parent.dealloc_range(base, size); - } - - void dealloc_overflow(capptr::Chunk overflow) + public: + template + class Type : public ContainsParent { - if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) + using ContainsParent::parent; + + /** + * The size of memory requested so far. + * + * This is used to determine the refill size. + */ + size_t requested_total = 0; + + /** + * Buddy allocator used to represent this range of memory. + */ + Buddy, MIN_CHUNK_BITS, MAX_SIZE_BITS> buddy_large; + + /** + * The parent might not support deallocation if this buddy allocator + * covers the whole range. Uses template insanity to make this work. + */ + template + std::enable_if_t + parent_dealloc_range(capptr::Chunk base, size_t size) { - if (overflow != nullptr) - { - parent.dealloc_range(overflow, bits::one_at_bit(MAX_SIZE_BITS)); - } + static_assert( + MAX_SIZE_BITS != (bits::BITS - 1), "Don't set SFINAE parameter"); + parent.dealloc_range(base, size); } - else - { - if (overflow != nullptr) - abort(); - } - } - - /** - * Add a range of memory to the address space. - * Divides blocks into power of two sizes with natural alignment - */ - void add_range(capptr::Chunk base, size_t length) - { - range_to_pow_2_blocks( - base, length, [this](capptr::Chunk base, size_t align, bool) { - auto overflow = capptr::Chunk(reinterpret_cast( - buddy_large.add_block(base.unsafe_uintptr(), align))); - dealloc_overflow(overflow); - }); - } - - capptr::Chunk refill(size_t size) - { - if (ParentRange::Aligned) + void dealloc_overflow(capptr::Chunk overflow) { - // Use amount currently requested to determine refill size. - // This will gradually increase the usage of the parent range. - // So small examples can grow local caches slowly, and larger - // examples will grow them by the refill size. - // - // The heuristic is designed to allocate the following sequence for - // 16KiB requests 16KiB, 16KiB, 32Kib, 64KiB, ..., REFILL_SIZE/2, - // REFILL_SIZE, REFILL_SIZE, ... Hence if this if they are coming from a - // contiguous aligned range, then they could be consolidated. This - // depends on the ParentRange behaviour. - size_t refill_size = bits::min(REFILL_SIZE, requested_total); - refill_size = bits::max(refill_size, MIN_REFILL_SIZE); - refill_size = bits::max(refill_size, size); - refill_size = bits::next_pow2(refill_size); - - auto refill_range = parent.alloc_range(refill_size); - if (refill_range != nullptr) + if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) + { + if (overflow != nullptr) + { + parent.dealloc_range(overflow, bits::one_at_bit(MAX_SIZE_BITS)); + } + } + else { - requested_total += refill_size; - add_range(pointer_offset(refill_range, size), refill_size - size); + if (overflow != nullptr) + abort(); } - return refill_range; } - // Note the unaligned parent path does not use - // requested_total in the heuristic for the initial size - // this is because the request needs to introduce alignment. - // Currently the unaligned variant is not used as a local cache. - // So the gradual growing of refill_size is not needed. - - // Need to overallocate to get the alignment right. - bool overflow = false; - size_t needed_size = bits::umul(size, 2, overflow); - if (overflow) + /** + * Add a range of memory to the address space. + * Divides blocks into power of two sizes with natural alignment + */ + void add_range(capptr::Chunk base, size_t length) { - return nullptr; + range_to_pow_2_blocks( + base, length, [this](capptr::Chunk base, size_t align, bool) { + auto overflow = capptr::Chunk(reinterpret_cast( + buddy_large.add_block(base.unsafe_uintptr(), align))); + + dealloc_overflow(overflow); + }); } - auto refill_size = bits::max(needed_size, REFILL_SIZE); - while (needed_size <= refill_size) + capptr::Chunk refill(size_t size) { - auto refill = parent.alloc_range(refill_size); - - if (refill != nullptr) + if (ParentRange::Aligned) { - requested_total += refill_size; - add_range(refill, refill_size); + // Use amount currently requested to determine refill size. + // This will gradually increase the usage of the parent range. + // So small examples can grow local caches slowly, and larger + // examples will grow them by the refill size. + // + // The heuristic is designed to allocate the following sequence for + // 16KiB requests 16KiB, 16KiB, 32Kib, 64KiB, ..., REFILL_SIZE/2, + // REFILL_SIZE, REFILL_SIZE, ... Hence if this if they are coming from + // a contiguous aligned range, then they could be consolidated. This + // depends on the ParentRange behaviour. + size_t refill_size = bits::min(REFILL_SIZE, requested_total); + refill_size = bits::max(refill_size, MIN_REFILL_SIZE); + refill_size = bits::max(refill_size, size); + refill_size = bits::next_pow2(refill_size); + + auto refill_range = parent.alloc_range(refill_size); + if (refill_range != nullptr) + { + requested_total += refill_size; + add_range(pointer_offset(refill_range, size), refill_size - size); + } + return refill_range; + } - SNMALLOC_ASSERT(refill_size < bits::one_at_bit(MAX_SIZE_BITS)); - static_assert( - (REFILL_SIZE < bits::one_at_bit(MAX_SIZE_BITS)) || - ParentRange::Aligned, - "Required to prevent overflow."); + // Note the unaligned parent path does not use + // requested_total in the heuristic for the initial size + // this is because the request needs to introduce alignment. + // Currently the unaligned variant is not used as a local cache. + // So the gradual growing of refill_size is not needed. - return alloc_range(size); + // Need to overallocate to get the alignment right. + bool overflow = false; + size_t needed_size = bits::umul(size, 2, overflow); + if (overflow) + { + return nullptr; } - refill_size >>= 1; - } + auto refill_size = bits::max(needed_size, REFILL_SIZE); + while (needed_size <= refill_size) + { + auto refill = parent.alloc_range(refill_size); - return nullptr; - } + if (refill != nullptr) + { + requested_total += refill_size; + add_range(refill, refill_size); - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = LargeBuddyRange< - REFILL_SIZE_BITS, - MAX_SIZE_BITS, - Pagemap, - MIN_REFILL_SIZE_BITS, - ParentRange2>; + SNMALLOC_ASSERT(refill_size < bits::one_at_bit(MAX_SIZE_BITS)); + static_assert( + (REFILL_SIZE < bits::one_at_bit(MAX_SIZE_BITS)) || + ParentRange::Aligned, + "Required to prevent overflow."); - static constexpr bool Aligned = true; + return alloc_range(size); + } - static constexpr bool ConcurrencySafe = false; + refill_size >>= 1; + } - constexpr LargeBuddyRange() = default; + return nullptr; + } - capptr::Chunk alloc_range(size_t size) - { - SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); - SNMALLOC_ASSERT(bits::is_pow2(size)); + public: + static constexpr bool Aligned = true; + + static constexpr bool ConcurrencySafe = false; - if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + constexpr Type() = default; + + capptr::Chunk alloc_range(size_t size) { - if (ParentRange::Aligned) - return parent.alloc_range(size); + SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); + SNMALLOC_ASSERT(bits::is_pow2(size)); - return nullptr; - } + if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + { + if (ParentRange::Aligned) + return parent.alloc_range(size); - auto result = capptr::Chunk( - reinterpret_cast(buddy_large.remove_block(size))); + return nullptr; + } - if (result != nullptr) - return result; + auto result = capptr::Chunk( + reinterpret_cast(buddy_large.remove_block(size))); - return refill(size); - } + if (result != nullptr) + return result; - void dealloc_range(capptr::Chunk base, size_t size) - { - SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); - SNMALLOC_ASSERT(bits::is_pow2(size)); + return refill(size); + } - if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) + void dealloc_range(capptr::Chunk base, size_t size) { - if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE); + SNMALLOC_ASSERT(bits::is_pow2(size)); + + if constexpr (MAX_SIZE_BITS != (bits::BITS - 1)) { - parent_dealloc_range(base, size); - return; + if (size >= (bits::one_at_bit(MAX_SIZE_BITS) - 1)) + { + parent_dealloc_range(base, size); + return; + } } - } - auto overflow = capptr::Chunk(reinterpret_cast( - buddy_large.add_block(base.unsafe_uintptr(), size))); - dealloc_overflow(overflow); - } + auto overflow = capptr::Chunk(reinterpret_cast( + buddy_large.add_block(base.unsafe_uintptr(), size))); + dealloc_overflow(overflow); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/logrange.h b/src/snmalloc/backend_helpers/logrange.h index 7c5308556..9bc1d4731 100644 --- a/src/snmalloc/backend_helpers/logrange.h +++ b/src/snmalloc/backend_helpers/logrange.h @@ -11,51 +11,49 @@ namespace snmalloc * * ParentRange is what the range is logging calls to. */ - template - class LogRange : public ContainsParent + template + struct LogRange { - using ContainsParent::parent; - - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = LogRange; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - constexpr LogRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { + capptr::Chunk alloc_range(size_t size) + { #ifdef SNMALLOC_TRACING - message<1024>("Call alloc_range({}) on {}", size, RangeName); + message<1024>("Call alloc_range({}) on {}", size, RangeName); #endif - auto range = parent.alloc_range(size); + auto range = parent.alloc_range(size); #ifdef SNMALLOC_TRACING - message<1024>( - "{} = alloc_range({}) in {}", range.unsafe_ptr(), size, RangeName); + message<1024>( + "{} = alloc_range({}) in {}", range.unsafe_ptr(), size, RangeName); #endif - return range; - } + return range; + } - void dealloc_range(capptr::Chunk base, size_t size) - { + void dealloc_range(capptr::Chunk base, size_t size) + { #ifdef SNMALLOC_TRACING - message<1024>( - "dealloc_range({}, {}}) on {}", base.unsafe_ptr(), size, RangeName); + message<1024>( + "dealloc_range({}, {}}) on {}", base.unsafe_ptr(), size, RangeName); #endif - parent.dealloc_range(base, size); + parent.dealloc_range(base, size); #ifdef SNMALLOC_TRACING - message<1024>( - "Done dealloc_range({}, {}})! on {}", - base.unsafe_ptr(), - size, - RangeName); + message<1024>( + "Done dealloc_range({}, {}})! on {}", + base.unsafe_ptr(), + size, + RangeName); #endif - } + } + }; }; -} // namespace snmalloc \ No newline at end of file +} // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/pagemapregisterrange.h b/src/snmalloc/backend_helpers/pagemapregisterrange.h index d0fe39d46..7201f8ff9 100644 --- a/src/snmalloc/backend_helpers/pagemapregisterrange.h +++ b/src/snmalloc/backend_helpers/pagemapregisterrange.h @@ -8,40 +8,37 @@ namespace snmalloc { template< SNMALLOC_CONCEPT(IsWritablePagemapWithRegister) Pagemap, - bool CanConsolidate = true, - typename ParentRange = EmptyRange> - class PagemapRegisterRange : public ContainsParent + bool CanConsolidate = true> + struct PagemapRegisterRange { - using ContainsParent::parent; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = PagemapRegisterRange; + public: + constexpr Type() = default; - constexpr PagemapRegisterRange() = default; + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool Aligned = ParentRange::Aligned; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + capptr::Chunk alloc_range(size_t size) + { + auto base = parent.alloc_range(size); - capptr::Chunk alloc_range(size_t size) - { - auto base = parent.alloc_range(size); + if (base != nullptr) + Pagemap::register_range(address_cast(base), size); - if (base != nullptr) - Pagemap::register_range(address_cast(base), size); + if (!CanConsolidate) + { + // Mark start of allocation in pagemap. + auto& entry = Pagemap::get_metaentry_mut(address_cast(base)); + entry.set_boundary(); + } - if (!CanConsolidate) - { - // Mark start of allocation in pagemap. - auto& entry = Pagemap::get_metaentry_mut(address_cast(base)); - entry.set_boundary(); + return base; } - - return base; - } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/range_helpers.h b/src/snmalloc/backend_helpers/range_helpers.h index fb7e3e0b9..90ee8474a 100644 --- a/src/snmalloc/backend_helpers/range_helpers.h +++ b/src/snmalloc/backend_helpers/range_helpers.h @@ -61,7 +61,7 @@ namespace snmalloc { public: using result = - typename PipeImpl, Rest...>::result; + typename PipeImpl, Rest...>::result; }; /** diff --git a/src/snmalloc/backend_helpers/smallbuddyrange.h b/src/snmalloc/backend_helpers/smallbuddyrange.h index 780d8cdec..fc42d6f7d 100644 --- a/src/snmalloc/backend_helpers/smallbuddyrange.h +++ b/src/snmalloc/backend_helpers/smallbuddyrange.h @@ -143,100 +143,97 @@ namespace snmalloc } }; - template - class SmallBuddyRange : public ContainsParent + struct SmallBuddyRange { - using ContainsParent::parent; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - static constexpr size_t MIN_BITS = - bits::next_pow2_bits_const(sizeof(FreeChunk)); + static constexpr size_t MIN_BITS = + bits::next_pow2_bits_const(sizeof(FreeChunk)); - Buddy buddy_small; + Buddy buddy_small; - /** - * Add a range of memory to the address space. - * Divides blocks into power of two sizes with natural alignment - */ - void add_range(capptr::Chunk base, size_t length) - { - range_to_pow_2_blocks( - base, length, [this](capptr::Chunk base, size_t align, bool) { - capptr::Chunk overflow = - buddy_small.add_block(base.as_reinterpret(), align) - .template as_reinterpret(); - if (overflow != nullptr) - parent.dealloc_range(overflow, bits::one_at_bit(MIN_CHUNK_BITS)); - }); - } - - capptr::Chunk refill(size_t size) - { - auto refill = parent.alloc_range(MIN_CHUNK_SIZE); + /** + * Add a range of memory to the address space. + * Divides blocks into power of two sizes with natural alignment + */ + void add_range(capptr::Chunk base, size_t length) + { + range_to_pow_2_blocks( + base, length, [this](capptr::Chunk base, size_t align, bool) { + capptr::Chunk overflow = + buddy_small.add_block(base.as_reinterpret(), align) + .template as_reinterpret(); + if (overflow != nullptr) + parent.dealloc_range(overflow, bits::one_at_bit(MIN_CHUNK_BITS)); + }); + } - if (refill != nullptr) - add_range(pointer_offset(refill, size), MIN_CHUNK_SIZE - size); + capptr::Chunk refill(size_t size) + { + auto refill = parent.alloc_range(MIN_CHUNK_SIZE); - return refill; - } + if (refill != nullptr) + add_range(pointer_offset(refill, size), MIN_CHUNK_SIZE - size); - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = SmallBuddyRange; + return refill; + } - static constexpr bool Aligned = true; - static_assert(ParentRange::Aligned, "ParentRange must be aligned"); + public: + static constexpr bool Aligned = true; + static_assert(ParentRange::Aligned, "ParentRange must be aligned"); - static constexpr bool ConcurrencySafe = false; + static constexpr bool ConcurrencySafe = false; - constexpr SmallBuddyRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - if (size >= MIN_CHUNK_SIZE) + capptr::Chunk alloc_range(size_t size) { - return parent.alloc_range(size); - } + if (size >= MIN_CHUNK_SIZE) + { + return parent.alloc_range(size); + } - auto result = buddy_small.remove_block(size); - if (result != nullptr) - { - result->left = nullptr; - result->right = nullptr; - return result.template as_reinterpret(); + auto result = buddy_small.remove_block(size); + if (result != nullptr) + { + result->left = nullptr; + result->right = nullptr; + return result.template as_reinterpret(); + } + return refill(size); } - return refill(size); - } - capptr::Chunk alloc_range_with_leftover(size_t size) - { - SNMALLOC_ASSERT(size <= MIN_CHUNK_SIZE); + capptr::Chunk alloc_range_with_leftover(size_t size) + { + SNMALLOC_ASSERT(size <= MIN_CHUNK_SIZE); - auto rsize = bits::next_pow2(size); + auto rsize = bits::next_pow2(size); - auto result = alloc_range(rsize); + auto result = alloc_range(rsize); - if (result == nullptr) - return nullptr; + if (result == nullptr) + return nullptr; - auto remnant = pointer_offset(result, size); + auto remnant = pointer_offset(result, size); - add_range(remnant, rsize - size); + add_range(remnant, rsize - size); - return result.template as_reinterpret(); - } + return result.template as_reinterpret(); + } - void dealloc_range(capptr::Chunk base, size_t size) - { - if (size >= MIN_CHUNK_SIZE) + void dealloc_range(capptr::Chunk base, size_t size) { - parent.dealloc_range(base, size); - return; - } + if (size >= MIN_CHUNK_SIZE) + { + parent.dealloc_range(base, size); + return; + } - add_range(base, size); - } + add_range(base, size); + } + }; }; } // namespace snmalloc diff --git a/src/snmalloc/backend_helpers/statsrange.h b/src/snmalloc/backend_helpers/statsrange.h index 2f6fdbc03..f38f6e999 100644 --- a/src/snmalloc/backend_helpers/statsrange.h +++ b/src/snmalloc/backend_helpers/statsrange.h @@ -10,58 +10,55 @@ namespace snmalloc /** * Used to measure memory usage. */ - template - class StatsRange : public ContainsParent + struct StatsRange { - using ContainsParent::parent; - - static inline std::atomic current_usage{}; - static inline std::atomic peak_usage{}; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = StatsRange; + static inline std::atomic current_usage{}; + static inline std::atomic peak_usage{}; - static constexpr bool Aligned = ParentRange::Aligned; + public: + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - constexpr StatsRange() = default; + constexpr Type() = default; - capptr::Chunk alloc_range(size_t size) - { - auto result = parent.alloc_range(size); - if (result != nullptr) + capptr::Chunk alloc_range(size_t size) { - auto prev = current_usage.fetch_add(size); - auto curr = peak_usage.load(); - while (curr < prev + size) + auto result = parent.alloc_range(size); + if (result != nullptr) { - if (peak_usage.compare_exchange_weak(curr, prev + size)) - break; + auto prev = current_usage.fetch_add(size); + auto curr = peak_usage.load(); + while (curr < prev + size) + { + if (peak_usage.compare_exchange_weak(curr, prev + size)) + break; + } } + return result; } - return result; - } - void dealloc_range(capptr::Chunk base, size_t size) - { - current_usage -= size; - parent.dealloc_range(base, size); - } + void dealloc_range(capptr::Chunk base, size_t size) + { + current_usage -= size; + parent.dealloc_range(base, size); + } - size_t get_current_usage() - { - return current_usage.load(); - } + size_t get_current_usage() + { + return current_usage.load(); + } - size_t get_peak_usage() - { - return peak_usage.load(); - } + size_t get_peak_usage() + { + return peak_usage.load(); + } + }; }; template diff --git a/src/snmalloc/backend_helpers/subrange.h b/src/snmalloc/backend_helpers/subrange.h index ca4f973cf..03c782539 100644 --- a/src/snmalloc/backend_helpers/subrange.h +++ b/src/snmalloc/backend_helpers/subrange.h @@ -9,43 +9,41 @@ namespace snmalloc * 2^RATIO_BITS. Will not return a the block at the start or * the end of the large allocation. */ - template - class SubRange : public ContainsParent + template + struct SubRange { - using ContainsParent::parent; - - public: - /** - * We use a nested Apply type to enable a Pipe operation. - */ - template - using Apply = SubRange; + template + class Type : public ContainsParent + { + using ContainsParent::parent; - constexpr SubRange() = default; + public: + constexpr Type() = default; - static constexpr bool Aligned = ParentRange::Aligned; + static constexpr bool Aligned = ParentRange::Aligned; - static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; + static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe; - capptr::Chunk alloc_range(size_t sub_size) - { - SNMALLOC_ASSERT(bits::is_pow2(sub_size)); - - auto full_size = sub_size << RATIO_BITS; - auto overblock = parent.alloc_range(full_size); - if (overblock == nullptr) - return nullptr; - - size_t offset_mask = full_size - sub_size; - // Don't use first or last block in the larger reservation - // Loop required to get uniform distribution. - size_t offset; - do + capptr::Chunk alloc_range(size_t sub_size) { - offset = get_entropy64() & offset_mask; - } while ((offset == 0) || (offset == offset_mask)); - - return pointer_offset(overblock, offset); - } + SNMALLOC_ASSERT(bits::is_pow2(sub_size)); + + auto full_size = sub_size << RATIO_BITS; + auto overblock = parent.alloc_range(full_size); + if (overblock == nullptr) + return nullptr; + + size_t offset_mask = full_size - sub_size; + // Don't use first or last block in the larger reservation + // Loop required to get uniform distribution. + size_t offset; + do + { + offset = get_entropy64() & offset_mask; + } while ((offset == 0) || (offset == offset_mask)); + + return pointer_offset(overblock, offset); + } + }; }; } // namespace snmalloc