Skip to content

Commit

Permalink
Increase release rate in HugeCache if realized fragmentation allows.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 691593867
Change-Id: I20a53276e0f9aae9f161c2391a0b9a86b035a320
  • Loading branch information
q-ge authored and copybara-github committed Oct 30, 2024
1 parent 3cf4d18 commit 9b1f4de
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 2 deletions.
8 changes: 6 additions & 2 deletions tcmalloc/huge_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -357,8 +357,12 @@ HugeLength HugeCache::GetDesiredReleaseablePages(

HugeLength HugeCache::ReleaseCachedPagesByDemand(
HugeLength n, SkipSubreleaseIntervals intervals, bool hit_limit) {
// We cannot release more than what exists in the cache.
HugeLength release_target = std::min(n, size());
// We cannot release more than what exists in the cache. Also, we want to
// increase the release target if the cache has been fragmented for a while
// (default 5 min).
HugeLength release_target = std::min(
std::max(n, HLFromPages(cachestats_tracker_.RealizedFragmentation())),
size());

// When demand-based release is enabled, we would no longer unback in
// Release(). Hence, we want to release some hugepages even though the target
Expand Down
31 changes: 31 additions & 0 deletions tcmalloc/huge_cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,37 @@ TEST_P(HugeCacheTest, ReleaseByDemandCappedByDemandPeak) {
NHugePages(15));
}

// Tests that the we can increase the release target if the realized
// fragmentation is high.
TEST_P(HugeCacheTest, ReleaseByDemandIncreaseReleaseTarget) {
if (!GetDemandBasedRelease()) {
GTEST_SKIP();
}
EXPECT_CALL(mock_unback_, Unback(testing::_, testing::_))
.WillRepeatedly(Return(true));

bool released;
// Creates realized fragmentation.
HugeRange large = cache_.Get(NHugePages(100), &released);
Release(large);
Advance(absl::Minutes(5));
HugeRange small = cache_.Get(NHugePages(10), &released);
Release(small);
Advance(absl::Minutes(1));
// Releases more that requested due to high fragmentation.
EXPECT_EQ(cache_.ReleaseCachedPagesByDemand(
NHugePages(10),
SkipSubreleaseIntervals{.short_interval = absl::Minutes(1),
.long_interval = absl::Minutes(1)},
/*hit_limit=*/false),
NHugePages(90));
// Releases the rest.
EXPECT_EQ(cache_.ReleaseCachedPagesByDemand(NHugePages(100),
SkipSubreleaseIntervals{},
/*hit_limit=*/false),
NHugePages(10));
}

// Tests demand-based skip release. The test is a modified version of the
// FillerTest.SkipSubrelease test by removing parts designed particularly for
// subrelease.
Expand Down
19 changes: 19 additions & 0 deletions tcmalloc/huge_page_subrelease.h
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,25 @@ class SubreleaseStatsTracker {
return mins;
}

// Returns the realized fragmentation, which is the minimum number of free
// backed pages over the last summary_interval_ (default 5 min).
Length RealizedFragmentation() const {
Length min_free_backed = Length::max();
int64_t num_epochs =
std::min<int64_t>(summary_interval_ / epoch_length_, kEpochs);
tracker_.IterBackwards(
[&](size_t offset, int64_t ts, const SubreleaseStatsEntry& e) {
if (!e.empty()) {
min_free_backed =
std::min(min_free_backed, e.min_free_backed_pages);
}
},
num_epochs);
min_free_backed =
(min_free_backed == Length::max()) ? Length(0) : min_free_backed;
return min_free_backed;
}

private:
// We collect subrelease statistics at four "interesting points" within each
// time step: at min/max demand of pages and at min/max use of hugepages. This
Expand Down
18 changes: 18 additions & 0 deletions tcmalloc/huge_page_subrelease_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,24 @@ TEST_F(StatsTrackerTest, ComputeRecentDemandAndCappedToPeak) {
EXPECT_EQ(demand_2, Length(1500));
}

// Tests that we can compute the realized fragmentation correctly.
TEST_F(StatsTrackerTest, ComputeRealizedFragmentation) {
GenerateDemandPoint(Length(50), Length(500));
Advance(absl::Minutes(2));
GenerateDemandPoint(Length(3000), Length(1000));
Advance(absl::Minutes(1));
GenerateDemandPoint(Length(1500), Length(2000));
Advance(absl::Minutes(2));
Length fragmentation_1 = tracker_.RealizedFragmentation();
EXPECT_EQ(fragmentation_1, Length(500));

Advance(absl::Minutes(30));
GenerateDemandPoint(Length(1500), Length(2000));
Advance(absl::Minutes(2));
Length fragmentation_2 = tracker_.RealizedFragmentation();
EXPECT_EQ(fragmentation_2, Length(2000));
}

TEST_F(StatsTrackerTest, TrackCorrectSubreleaseDecisions) {
// First peak (large)
GenerateDemandPoint(Length(1000), Length(1000));
Expand Down

0 comments on commit 9b1f4de

Please sign in to comment.