diff --git a/tcmalloc/span.h b/tcmalloc/span.h index 7d3375a51..110bbd840 100644 --- a/tcmalloc/span.h +++ b/tcmalloc/span.h @@ -133,7 +133,7 @@ class Span final : public SpanList::Elem { // ensure by some other means that the sampling state can't be changed // concurrently. // REQUIRES: this is a SAMPLED span. - SampledAllocation* sampled_allocation() const; + const SampledAllocation& sampled_allocation() const; // Is it a sampling span? // For debug checks. pageheap_lock is not required, but caller needs to ensure @@ -460,10 +460,10 @@ inline bool Span::BitmapPush(void* ptr, size_t size, uint32_t reciprocal) { return true; } -inline SampledAllocation* Span::sampled_allocation() const { +inline const SampledAllocation& Span::sampled_allocation() const { TC_ASSERT(sampled_); TC_ASSERT(is_large_or_sampled()); - return large_or_sampled_state_.sampled_allocation; + return *large_or_sampled_state_.sampled_allocation; } inline bool Span::sampled() const { return sampled_; } diff --git a/tcmalloc/tcmalloc.cc b/tcmalloc/tcmalloc.cc index 60b97b441..fee20a3c7 100644 --- a/tcmalloc/tcmalloc.cc +++ b/tcmalloc/tcmalloc.cc @@ -501,7 +501,7 @@ inline size_t GetLargeSize(const void* ptr, const Span& span) { if (tc_globals.guardedpage_allocator().PointerIsMine(ptr)) { return tc_globals.guardedpage_allocator().GetRequestedSize(ptr); } - return span.sampled_allocation()->sampled_stack.allocated_size; + return span.sampled_allocation().sampled_stack.allocated_size; } else { return span.bytes_in_span(); }