Skip to content

Commit

Permalink
StrictProvenance support in Backend
Browse files Browse the repository at this point in the history
Wrap the FrontendSlabMetadata with a struct that holds the Arena-bounded
authority for Chunks that the Backend ships out to the Frontend or, for
non-StrictProvenance architecture, encapsulates the sleight of hand that turns
Chunk-bounded CapPtr-s to Arena-bounded ones.
  • Loading branch information
nwf-msr committed May 31, 2022
1 parent a503dd0 commit e87a1b3
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 8 deletions.
12 changes: 8 additions & 4 deletions src/snmalloc/backend/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ namespace snmalloc
return {nullptr, nullptr};
}

meta->arena_set(p);
typename Pagemap::Entry t(meta, ras);
Pagemap::set_metaentry(address_cast(p), size, t);

Expand Down Expand Up @@ -139,13 +140,16 @@ namespace snmalloc
Pagemap::get_metaentry(address_cast(alloc)).get_slab_metadata());
Pagemap::set_metaentry(address_cast(alloc), size, t);

/*
* On CHERI, the passed alloc has had its bounds narrowed to just the
* Chunk, and so we retrieve the Arena-bounded cap for use in the
* remainder of the backend.
*/
capptr::Arena<void> arena = slab_metadata.arena_get(alloc);

local_state.get_meta_range().dealloc_range(
capptr::Chunk<void>(&slab_metadata), sizeof(SlabMetadata));

// On non-CHERI platforms, we don't need to re-derive to get a pointer to
// the chunk. On CHERI platforms this will need to be stored in the
// SlabMetadata or similar.
capptr::Arena<void> arena{alloc.unsafe_ptr()};
local_state.get_object_range()->dealloc_range(arena, size);
}

Expand Down
4 changes: 2 additions & 2 deletions src/snmalloc/backend/meta_protected_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ namespace snmalloc
LocalCacheSizeBits - SubRangeRatioBits,
bits::BITS - 1,
Pagemap>,
ProvenanceCaptureRange<Pagemap>,
ProvenanceCaptureRange<Pagemap, backend_strict_provenance>,
SmallBuddyRange<>>;

ObjectRange object_range;
Expand All @@ -126,7 +126,7 @@ namespace snmalloc
// would require committing memory inside the main global lock.
using GlobalMetaRange = Pipe<
CentralMetaRange,
ProvenanceCaptureRange<Pagemap>,
ProvenanceCaptureRange<Pagemap, backend_strict_provenance>,
SmallBuddyRange<>,
GlobalRange<>>;
};
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/backend/standard_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ namespace snmalloc
private:
using ObjectRange = Pipe<
LargeObjectRange,
ProvenanceCaptureRange<Pagemap>,
ProvenanceCaptureRange<Pagemap, backend_strict_provenance>,
SmallBuddyRange<>>;

ObjectRange object_range;
Expand Down
73 changes: 72 additions & 1 deletion src/snmalloc/backend_helpers/defaultpagemapentry.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,77 @@ namespace snmalloc
SNMALLOC_FAST_PATH DefaultPagemapEntryT() = default;
};

using DefaultPagemapEntry = DefaultPagemapEntryT<FrontendSlabMetadata>;
/**
* In CHERI, we must retain, internal to the allocator, the authority to
* entire backing arenas, as there is no architectural mechanism to splice
* together two capabilities. Additionally, these capabilities will retain
* the VMAP software permission, conveying our authority to manipulate the
* address space mappings for said arenas.
*
* We stash these pointers inside the SlabMetadata structures for parts of
* the address space for which SlabMetadata exists. (In other parts of the
* system, we will stash them directly in the pagemap.) This requires that
* we inherit from the FrontendSlabMetadata.
*/

class StrictProvenanceBackendSlabMetadata : public FrontendSlabMetadata
{
template<
SNMALLOC_CONCEPT(ConceptPAL) A1,
typename A2,
typename A3,
typename A4>
friend class BackendAllocator;

capptr::Arena<void> arena;

/* Set the arena pointer */
void arena_set(capptr::Arena<void> a)
{
arena = a;
}

/*
* Retrieve the stashed pointer for a chunk; the caller must ensure that
* this is the correct arena for the indicated chunk. The latter is unused
* except in debug builds, as there is no architectural amplification.
*/
capptr::Arena<void> arena_get(capptr::Alloc<void> c)
{
SNMALLOC_ASSERT(address_cast(arena) == address_cast(c));
UNUSED(c);
return arena;
}
};

struct LaxBackendSlabMetadata : public FrontendSlabMetadata
{
/* On non-StrictProvenance architectures, there's nothing to do */
void arena_set(capptr::Arena<void>) {}

/* Just a type sleight of hand, "amplifying" the non-existant bounds */
capptr::Arena<void> arena_get(capptr::Alloc<void> c)
{
return capptr::Arena<void>(c.unsafe_ptr());
}
};

/*
* The backend's leading-order response to StrictProvenance is entirely
* within its data structures and not actually anything to do with the
* architecture. Rather than test aal_supports<StrictProvenance> or
* defined(__CHERI_PURE_CAPABILITY__) or such therein, using this
* backend_strict_provenance flag makes it easy to test a lot of machinery
* on non-StrictProvenance architectures.
*
* XXX This almost surely belongs somewhere else; where?
*/
static constexpr bool backend_strict_provenance =
aal_supports<StrictProvenance>;

using DefaultPagemapEntry = DefaultPagemapEntryT<std::conditional_t<
backend_strict_provenance,
StrictProvenanceBackendSlabMetadata,
LaxBackendSlabMetadata>>;

} // namespace snmalloc

0 comments on commit e87a1b3

Please sign in to comment.