Skip to content

Commit

Permalink
Move alloc_size and check_size to globalalloc.
Browse files Browse the repository at this point in the history
  • Loading branch information
mjp41 committed Feb 19, 2025
1 parent ad4ed39 commit 5d4cfa4
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 77 deletions.
2 changes: 1 addition & 1 deletion fuzzing/snmalloc-fuzzer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ struct Result
{
auto alloc = snmalloc::get_scoped_allocator();
if (ptr)
alloc->dealloc(ptr, size);
alloc->dealloc(ptr);
ptr = nullptr;
}
};
Expand Down
71 changes: 62 additions & 9 deletions src/snmalloc/global/globalalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,59 @@ namespace snmalloc
return meta_slab->get_meta_for_object(index);
}

/**
* @brief Checks that the supplied size of the allocation matches the size
* snmalloc believes the allocation is. Only performs the check if
* mitigations(sanity_checks)
* is enabled.
*/
SNMALLOC_FAST_PATH_INLINE void check_size(void* p, size_t size)
{
if constexpr (mitigations(sanity_checks))
{
const auto& entry = Config::Backend::get_metaentry(address_cast(p));
if (!entry.is_owned())
return;
size = size == 0 ? 1 : size;
auto sc = size_to_sizeclass_full(size);
auto pm_sc = entry.get_sizeclass();
auto rsize = sizeclass_full_to_size(sc);
auto pm_size = sizeclass_full_to_size(pm_sc);
snmalloc_check_client(
mitigations(sanity_checks),
(sc == pm_sc) || (p == nullptr),
"Dealloc rounded size mismatch: {} != {}",
rsize,
pm_size);
}
else
UNUSED(p, size);
}

SNMALLOC_FAST_PATH_INLINE size_t alloc_size(const void* p_raw)
{
const auto& entry = Config::Backend::get_metaentry(address_cast(p_raw));

if (SNMALLOC_UNLIKELY(
!SecondaryAllocator::pass_through && !entry.is_owned() &&
p_raw != nullptr))
return SecondaryAllocator::alloc_size(p_raw);
// TODO What's the domestication policy here? At the moment we just
// probe the pagemap with the raw address, without checks. There could
// be implicit domestication through the `Config::Pagemap` or
// we could just leave well enough alone.

// Note that alloc_size should return 0 for nullptr.
// Other than nullptr, we know the system will be initialised as it must
// be called with something we have already allocated.
//
// To handle this case we require the uninitialised pagemap contain an
// entry for the first chunk of memory, that states it represents a
// large object, so we can pull the check for null off the fast path.

return sizeclass_full_to_size(entry.get_sizeclass());
}

template<size_t size, ZeroMem zero_mem = NoZero, size_t align = 1>
SNMALLOC_FAST_PATH_INLINE void* alloc()
{
Expand All @@ -291,23 +344,22 @@ namespace snmalloc

SNMALLOC_FAST_PATH_INLINE void dealloc(void* p, size_t size)
{
ThreadAlloc::get().dealloc(p, size);
check_size(p, size);
ThreadAlloc::get().dealloc(p);
}

template<size_t size>
SNMALLOC_FAST_PATH_INLINE void dealloc(void* p)
{
ThreadAlloc::get().dealloc(p, size);
check_size(p, size);
ThreadAlloc::get().dealloc(p);
}

SNMALLOC_FAST_PATH_INLINE void dealloc(void* p, size_t size, size_t align)
{
ThreadAlloc::get().dealloc(p, aligned_size(size, align));
}

SNMALLOC_FAST_PATH_INLINE size_t alloc_size(const void* p)
{
return ThreadAlloc::get().alloc_size(p);
auto rsize = aligned_size(align, size);
check_size(p, rsize);
ThreadAlloc::get().dealloc(p);
}

SNMALLOC_FAST_PATH_INLINE void debug_teardown()
Expand All @@ -317,6 +369,7 @@ namespace snmalloc

SNMALLOC_FAST_PATH_INLINE bool is_owned(void* p)
{
return ThreadAlloc::get().is_snmalloc_owned(p);
const auto& entry = Config::Backend::get_metaentry(address_cast(p));
return entry.is_owned();
}
} // namespace snmalloc
66 changes: 2 additions & 64 deletions src/snmalloc/mem/localalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ namespace snmalloc
message<1024>(
"Remote dealloc post {} ({}, {})",
p.unsafe_ptr(),
alloc_size(p.unsafe_ptr()),
sizeclass_full_to_size(entry.get_sizeclass()),
address_cast(entry.get_slab_metadata()));
#endif
local_cache.remote_dealloc_cache.template dealloc<sizeof(CoreAlloc)>(
Expand Down Expand Up @@ -487,7 +487,6 @@ namespace snmalloc
{
if (SNMALLOC_LIKELY(entry.is_owned()))
{
RemoteAllocator* remote = entry.get_remote();
dealloc_cheri_checks(p_tame.unsafe_ptr());

// Detect double free of large allocations here.
Expand All @@ -505,7 +504,7 @@ namespace snmalloc
message<1024>(
"Remote dealloc fast {} ({}, {})",
address_cast(p_tame),
alloc_size(p_tame.unsafe_ptr()),
sizeclass_full_to_size(entry.get_sizeclass()),
address_cast(entry.get_slab_metadata()));
#endif
return;
Expand All @@ -527,42 +526,6 @@ namespace snmalloc
SecondaryAllocator::deallocate(p_tame.unsafe_ptr());
}

void check_size(void* p, size_t size)
{
if constexpr (mitigations(sanity_checks))
{
const auto& entry = Config::Backend::get_metaentry(address_cast(p));
if (!entry.is_owned(p))
return;
size = size == 0 ? 1 : size;
auto sc = size_to_sizeclass_full(size);
auto pm_sc = entry.get_sizeclass();
auto rsize = sizeclass_full_to_size(sc);
auto pm_size = sizeclass_full_to_size(pm_sc);
snmalloc_check_client(
mitigations(sanity_checks),
(sc == pm_sc) || (p == nullptr),
"Dealloc rounded size mismatch: {} != {}",
rsize,
pm_size);
}
else
UNUSED(p, size);
}

SNMALLOC_FAST_PATH void dealloc(void* p, size_t s)
{
check_size(p, s);
dealloc(p);
}

template<size_t size>
SNMALLOC_FAST_PATH void dealloc(void* p)
{
check_size(p, size);
dealloc(p);
}

void teardown()
{
#ifdef SNMALLOC_TRACING
Expand All @@ -575,31 +538,6 @@ namespace snmalloc
}
}

SNMALLOC_FAST_PATH size_t alloc_size(const void* p_raw)
{
const PagemapEntry& entry =
Config::Backend::get_metaentry(address_cast(p_raw));

if (SNMALLOC_UNLIKELY(
!SecondaryAllocator::pass_through && !entry.is_owned() &&
p_raw != nullptr))
return SecondaryAllocator::alloc_size(p_raw);
// TODO What's the domestication policy here? At the moment we just
// probe the pagemap with the raw address, without checks. There could
// be implicit domestication through the `Config::Pagemap` or
// we could just leave well enough alone.

// Note that alloc_size should return 0 for nullptr.
// Other than nullptr, we know the system will be initialised as it must
// be called with something we have already allocated.
//
// To handle this case we require the uninitialised pagemap contain an
// entry for the first chunk of memory, that states it represents a
// large object, so we can pull the check for null off the fast path.

return sizeclass_full_to_size(entry.get_sizeclass());
}

/**
* Accessor, returns the local cache. If embedding code is allocating the
* core allocator for use by this local allocator then it needs to access
Expand Down
6 changes: 3 additions & 3 deletions src/test/func/memory/memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void test_limited(rlim64_t as_limit, size_t& count)
std::cout << "trying to alloc " << upper_bound / KiB << " KiB" << std::endl;
# endif
std::cout << "allocator initialised" << std::endl;
auto chunk = snmalloc::alloc.alloc(upper_bound);
auto chunk = snmalloc::alloc(upper_bound);
snmalloc::dealloc(chunk);
std::cout << "success" << std::endl;
std::exit(0);
Expand Down Expand Up @@ -221,14 +221,14 @@ void test_double_alloc()
while (!set1.empty())
{
auto it = set1.begin();
a2->dealloc(*it, 20);
a2->dealloc(*it);
set1.erase(it);
}

while (!set2.empty())
{
auto it = set2.begin();
a1->dealloc(*it, 20);
a1->dealloc(*it);
set2.erase(it);
}
}
Expand Down

0 comments on commit 5d4cfa4

Please sign in to comment.