Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

StrictProvenance backend support, take 2022.05 #537

Merged
merged 14 commits into from
Jun 9, 2022
Merged
137 changes: 88 additions & 49 deletions docs/StrictProvenance.md

Large diffs are not rendered by default.

14 changes: 13 additions & 1 deletion src/snmalloc/aal/aal.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,8 @@ namespace snmalloc
"capptr_bound must preserve non-spatial CapPtr dimensions");

UNUSED(size);
return CapPtr<T, BOut>(a.template as_static<T>().unsafe_ptr());
return CapPtr<T, BOut>::unsafe_from(
a.template as_static<T>().unsafe_ptr());
}
};
} // namespace snmalloc
Expand Down Expand Up @@ -245,6 +246,17 @@ namespace snmalloc

template<AalFeatures F, SNMALLOC_CONCEPT(ConceptAAL) AAL = Aal>
constexpr static bool aal_supports = (AAL::aal_features & F) == F;

/*
* The backend's leading-order response to StrictProvenance is entirely
* within its data structures and not actually anything to do with the
* architecture. Rather than test aal_supports<StrictProvenance> or
* defined(__CHERI_PURE_CAPABILITY__) or such therein, using this
* backend_strict_provenance flag makes it easy to test a lot of machinery
* on non-StrictProvenance architectures.
*/
static constexpr bool backend_strict_provenance =
aal_supports<StrictProvenance>;
} // namespace snmalloc

#ifdef __POINTER_WIDTH__
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/aal/aal_cheri.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ namespace snmalloc
}

void* pb = __builtin_cheri_bounds_set_exact(a.unsafe_ptr(), size);
return CapPtr<T, BOut>(static_cast<T*>(pb));
return CapPtr<T, BOut>::unsafe_from(static_cast<T*>(pb));
}
};
} // namespace snmalloc
18 changes: 12 additions & 6 deletions src/snmalloc/aal/address.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ namespace snmalloc
inline CapPtr<void, bounds>
pointer_offset(CapPtr<T, bounds> base, size_t diff)
{
return CapPtr<void, bounds>(pointer_offset(base.unsafe_ptr(), diff));
return CapPtr<void, bounds>::unsafe_from(
pointer_offset(base.unsafe_ptr(), diff));
}

/**
Expand All @@ -51,7 +52,8 @@ namespace snmalloc
inline CapPtr<void, bounds>
pointer_offset_signed(CapPtr<T, bounds> base, ptrdiff_t diff)
{
return CapPtr<void, bounds>(pointer_offset_signed(base.unsafe_ptr(), diff));
return CapPtr<void, bounds>::unsafe_from(
pointer_offset_signed(base.unsafe_ptr(), diff));
}

/**
Expand Down Expand Up @@ -137,7 +139,8 @@ namespace snmalloc
SNMALLOC_CONCEPT(capptr::ConceptBound) bounds>
inline CapPtr<T, bounds> pointer_align_down(CapPtr<void, bounds> p)
{
return CapPtr<T, bounds>(pointer_align_down<alignment, T>(p.unsafe_ptr()));
return CapPtr<T, bounds>::unsafe_from(
pointer_align_down<alignment, T>(p.unsafe_ptr()));
}

template<size_t alignment>
Expand Down Expand Up @@ -174,7 +177,8 @@ namespace snmalloc
SNMALLOC_CONCEPT(capptr::ConceptBound) bounds>
inline CapPtr<T, bounds> pointer_align_up(CapPtr<void, bounds> p)
{
return CapPtr<T, bounds>(pointer_align_up<alignment, T>(p.unsafe_ptr()));
return CapPtr<T, bounds>::unsafe_from(
pointer_align_up<alignment, T>(p.unsafe_ptr()));
}

template<size_t alignment>
Expand Down Expand Up @@ -204,7 +208,8 @@ namespace snmalloc
inline CapPtr<T, bounds>
pointer_align_down(CapPtr<void, bounds> p, size_t alignment)
{
return CapPtr<T, bounds>(pointer_align_down<T>(p.unsafe_ptr(), alignment));
return CapPtr<T, bounds>::unsafe_from(
pointer_align_down<T>(p.unsafe_ptr(), alignment));
}

/**
Expand All @@ -228,7 +233,8 @@ namespace snmalloc
inline CapPtr<T, bounds>
pointer_align_up(CapPtr<void, bounds> p, size_t alignment)
{
return CapPtr<T, bounds>(pointer_align_up<T>(p.unsafe_ptr(), alignment));
return CapPtr<T, bounds>::unsafe_from(
pointer_align_up<T>(p.unsafe_ptr(), alignment));
}

/**
Expand Down
31 changes: 19 additions & 12 deletions src/snmalloc/backend/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ namespace snmalloc
using Pal = PAL;
using SlabMetadata = typename PagemapEntry::SlabMetadata;

#ifdef __cpp_concepts
static_assert(IsSlabMeta_Arena<SlabMetadata>);
#endif

public:
/**
* Provide a block of meta-data with size and align.
Expand All @@ -35,10 +39,10 @@ namespace snmalloc
* does not avail itself of this degree of freedom.
*/
template<typename T>
static capptr::Chunk<void>
static capptr::Arena<void>
alloc_meta_data(LocalState* local_state, size_t size)
{
capptr::Chunk<void> p;
capptr::Arena<void> p;
if (local_state != nullptr)
{
p = local_state->get_meta_range().alloc_range_with_leftover(size);
Expand Down Expand Up @@ -84,7 +88,7 @@ namespace snmalloc
return {nullptr, nullptr};
}

auto p = local_state.object_range.alloc_range(size);
capptr::Arena<void> p = local_state.get_object_range()->alloc_range(size);

#ifdef SNMALLOC_TRACING
message<1024>("Alloc chunk: {} ({})", p.unsafe_ptr(), size);
Expand All @@ -97,14 +101,14 @@ namespace snmalloc
#ifdef SNMALLOC_TRACING
message<1024>("Out of memory");
#endif
return {p, nullptr};
return {nullptr, nullptr};
}

meta->arena_set(p);
typename Pagemap::Entry t(meta, ras);
Pagemap::set_metaentry(address_cast(p), size, t);

p = Aal::capptr_bound<void, capptr::bounds::Chunk>(p, size);
return {p, meta};
return {Aal::capptr_bound<void, capptr::bounds::Chunk>(p, size), meta};
}

/**
Expand Down Expand Up @@ -140,14 +144,17 @@ namespace snmalloc
Pagemap::get_metaentry(address_cast(alloc)).get_slab_metadata());
Pagemap::set_metaentry(address_cast(alloc), size, t);

/*
* On CHERI, the passed alloc has had its bounds narrowed to just the
* Chunk, and so we retrieve the Arena-bounded cap for use in the
* remainder of the backend.
*/
capptr::Arena<void> arena = slab_metadata.arena_get(alloc);

local_state.get_meta_range().dealloc_range(
capptr::Chunk<void>(&slab_metadata), sizeof(SlabMetadata));
capptr::Arena<void>::unsafe_from(&slab_metadata), sizeof(SlabMetadata));

// On non-CHERI platforms, we don't need to re-derive to get a pointer to
// the chunk. On CHERI platforms this will need to be stored in the
// SlabMetadata or similar.
capptr::Chunk<void> chunk{alloc.unsafe_ptr()};
local_state.object_range.dealloc_range(chunk, size);
local_state.get_object_range()->dealloc_range(arena, size);
}

template<bool potentially_out_of_range = false>
Expand Down
8 changes: 4 additions & 4 deletions src/snmalloc/backend/fixedglobalconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ namespace snmalloc

// Push memory into the global range.
range_to_pow_2_blocks<MIN_CHUNK_BITS>(
capptr::Chunk<void>(heap_base),
capptr::Arena<void>::unsafe_from(heap_base),
heap_length,
[&](capptr::Chunk<void> p, size_t sz, bool) {
[&](capptr::Arena<void> p, size_t sz, bool) {
typename LocalState::GlobalR g;
g.dealloc_range(p, sz);
});
Expand Down Expand Up @@ -108,8 +108,8 @@ namespace snmalloc

return CapPtr<
T,
typename B::template with_wildness<capptr::dimension::Wildness::Tame>>(
p.unsafe_ptr());
typename B::template with_wildness<capptr::dimension::Wildness::Tame>>::
unsafe_from(p.unsafe_ptr());
}
};
}
11 changes: 8 additions & 3 deletions src/snmalloc/backend/meta_protected_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,18 @@ namespace snmalloc
Pagemap>,
SmallBuddyRange>;

public:
using Stats = StatsCombiner<CentralObjectRange, CentralMetaRange>;

ObjectRange object_range;

MetaRange meta_range;

public:
using Stats = StatsCombiner<CentralObjectRange, CentralMetaRange>;

ObjectRange* get_object_range()
{
return &object_range;
}

MetaRange& get_meta_range()
{
return meta_range;
Expand Down
18 changes: 13 additions & 5 deletions src/snmalloc/backend/standard_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace snmalloc
template<
typename PAL,
typename Pagemap,
typename Base = EmptyRange,
typename Base = EmptyRange<>,
size_t MinSizeBits = MinBaseSizeBits<PAL>()>
struct StandardLocalState : BaseLocalStateConstants
{
Expand All @@ -44,23 +44,31 @@ namespace snmalloc
static constexpr size_t page_size_bits =
bits::next_pow2_bits_const(PAL::page_size);

public:
// Source for object allocations and metadata
// Use buddy allocators to cache locally.
using ObjectRange = Pipe<
using LargeObjectRange = Pipe<
Stats,
LargeBuddyRange<
LocalCacheSizeBits,
LocalCacheSizeBits,
Pagemap,
page_size_bits>,
SmallBuddyRange>;
page_size_bits>>;

private:
using ObjectRange = Pipe<LargeObjectRange, SmallBuddyRange>;

ObjectRange object_range;

public:
// Expose a global range for the initial allocation of meta-data.
using GlobalMetaRange = Pipe<ObjectRange, GlobalRange>;

// Where we get user allocations from.
ObjectRange object_range;
LargeObjectRange* get_object_range()
{
return object_range.template ancestor<LargeObjectRange>();
}

// Where we get meta-data allocations from.
ObjectRange& get_meta_range()
Expand Down
89 changes: 89 additions & 0 deletions src/snmalloc/backend_helpers/cheri_slabmetadata_mixin.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
#pragma once
#include "../pal/pal.h"

namespace snmalloc
{
/**
* In CHERI, we must retain, internal to the allocator, the authority to
* entire backing arenas, as there is no architectural mechanism to splice
* together two capabilities. Additionally, these capabilities will retain
* the VMAP software permission, conveying our authority to manipulate the
* address space mappings for said arenas.
*
* We stash these pointers inside the SlabMetadata structures for parts of
* the address space for which SlabMetadata exists. (In other parts of the
* system, we will stash them directly in the pagemap.) This requires that
* we inherit from the FrontendSlabMetadata.
*/
template<typename SlabMetadata>
class StrictProvenanceSlabMetadataMixin : public SlabMetadata
{
template<
SNMALLOC_CONCEPT(ConceptPAL) A1,
typename A2,
typename A3,
typename A4>
friend class BackendAllocator;

capptr::Arena<void> arena;

/* Set the arena pointer */
void arena_set(capptr::Arena<void> a)
{
arena = a;
}

/*
* Retrieve the stashed pointer for a chunk; the caller must ensure that
* this is the correct arena for the indicated chunk. The latter is unused
* except in debug builds, as there is no architectural amplification.
*/
capptr::Arena<void> arena_get(capptr::Alloc<void> c)
{
SNMALLOC_ASSERT(address_cast(arena) == address_cast(c));
UNUSED(c);
return arena;
}
};

/**
* A dummy implementation of StrictProvenanceBackendSlabMetadata that has no
* computational content, for use on non-StrictProvenance architectures.
*/
template<typename SlabMetadata>
struct LaxProvenanceSlabMetadataMixin : public SlabMetadata
{
/* On non-StrictProvenance architectures, there's nothing to do */
void arena_set(capptr::Arena<void>) {}

/* Just a type sleight of hand, "amplifying" the non-existant bounds */
capptr::Arena<void> arena_get(capptr::Alloc<void> c)
{
return capptr::Arena<void>::unsafe_from(c.unsafe_ptr());
}
};

#ifdef __cpp_concepts
/**
* Rather than having the backend test backend_strict_provenance in several
* places and doing sleights of hand with the type system, we encapsulate
* the amplification
*/
template<typename T>
concept IsSlabMeta_Arena = requires(T* t, capptr::Arena<void> p)
{
{
t->arena_set(p)
}
->ConceptSame<void>;
}
&&requires(T* t, capptr::Alloc<void> p)
{
{
t->arena_get(p)
}
->ConceptSame<capptr::Arena<void>>;
};
#endif

} // namespace snmalloc
9 changes: 7 additions & 2 deletions src/snmalloc/backend_helpers/commitrange.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,14 @@ namespace snmalloc

static constexpr bool ConcurrencySafe = ParentRange::ConcurrencySafe;

using ChunkBounds = typename ParentRange::ChunkBounds;
static_assert(
ChunkBounds::address_space_control ==
capptr::dimension::AddressSpaceControl::Full);

constexpr Type() = default;

capptr::Chunk<void> alloc_range(size_t size)
CapPtr<void, ChunkBounds> alloc_range(size_t size)
{
SNMALLOC_ASSERT_MSG(
(size % PAL::page_size) == 0,
Expand All @@ -33,7 +38,7 @@ namespace snmalloc
return range;
}

void dealloc_range(capptr::Chunk<void> base, size_t size)
void dealloc_range(CapPtr<void, ChunkBounds> base, size_t size)
{
SNMALLOC_ASSERT_MSG(
(size % PAL::page_size) == 0,
Expand Down
Loading