Skip to content

Commit

Permalink
Increases the release target to a fraction of HugeCache's size if pos…
Browse files Browse the repository at this point in the history
…sible.

PiperOrigin-RevId: 704933147
Change-Id: I260945169c40f70744b90c4fb15ae5bfd5666460
  • Loading branch information
q-ge authored and copybara-github committed Dec 11, 2024
1 parent e2d2f58 commit 6fba903
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 74 deletions.
34 changes: 18 additions & 16 deletions tcmalloc/huge_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -356,27 +356,29 @@ HugeLength HugeCache::GetDesiredReleaseablePages(

HugeLength HugeCache::ReleaseCachedPagesByDemand(
HugeLength n, SkipSubreleaseIntervals intervals, bool hit_limit) {
// We cannot release more than what exists in the cache. Also, we want to
// increase the release target if the cache has been fragmented for a while
// (default 5 min).
HugeLength release_target = std::min(
std::max(n, HLFromPages(cachestats_tracker_.RealizedFragmentation())),
size());

// When demand-based release is enabled, we would no longer unback in
// Release(). Hence, we want to release some hugepages even though the target
// is zero, and protect the minimum cache size at the same time. This covers
// the background release and the best effort release triggered by
// ReleaseMemoryToSystem(0).
if (release_target == NHugePages(0)) {
release_target =
size() > MinCacheLimit() ? size() - MinCacheLimit() : NHugePages(0);
// We get here when one of the three happened: A) hit limit, B) background
// release, or C) ReleaseMemoryToSystem().
HugeLength release_target = std::min(n, size());

// For all those three reasons, we want to release as much as possible to be
// efficient. However, we do not want to release a large number of hugepages
// at once because that may impact applications' performance. So we release a
// fraction of the cache.
if (size() > MinCacheLimit()) {
HugeLength increased_release_target =
std::min(HugeLength(kFractionToReleaseFromCache * size().raw_num()),
size() - MinCacheLimit());
release_target = std::max(release_target, increased_release_target);
}

if (release_target == NHugePages(0)) {
return NHugePages(0);
}
if (intervals.SkipSubreleaseEnabled() && !hit_limit) {
// Updates the target based on the recent demand history.
// This will reduce the target if the calculated (future) demand is higher
// than the current. In other words, we need to reserve some of the free
// hugepages to meet the future demand. It also makes sure we release the
// realized fragmentation.
release_target = GetDesiredReleaseablePages(release_target, intervals);
}
HugeLength released = ShrinkCache(size() - release_target);
Expand Down
10 changes: 9 additions & 1 deletion tcmalloc/huge_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,17 @@ class HugeCache {
MemoryModifyFunction& unback_;
absl::Duration cache_time_;

// Interval used for capping demand calculated for demand-based release.
// Interval used for capping demand calculated for demand-based release:
// making sure that it is not more than the maximum demand recorded in that
// period. When the cap applies, we also release the minimum amount of free
// hugepages that we have been consistently holding at anytime for 5 minutes
// (realized fragmentation).
absl::Duration CapDemandInterval() const { return absl::Minutes(5); }

// The fraction of the cache that we are happy to return at a time. We use
// this to efficiently reduce the fragmenation.
static constexpr double kFractionToReleaseFromCache = 0.2;

using StatsTrackerType = SubreleaseStatsTracker<600>;
StatsTrackerType::SubreleaseStats GetSubreleaseStats() const {
StatsTrackerType::SubreleaseStats stats;
Expand Down
94 changes: 37 additions & 57 deletions tcmalloc/huge_cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ TEST_P(HugeCacheTest, Growth) {
// Requests a best-effort demand-based release to shrink the cache.
if (GetDemandBasedRelease()) {
cache_.ReleaseCachedPagesByDemand(
NHugePages(0),
cache_.size(),
SkipSubreleaseIntervals{.short_interval = absl::Seconds(10),
.long_interval = absl::Seconds(10)},
/*hit_limit=*/false);
Expand Down Expand Up @@ -326,8 +326,9 @@ TEST_P(HugeCacheTest, Growth) {
for (auto r : items) {
Release(r);
}
// Requests a best-effort demand-based release. The cache should shrink
// to the working set size, avoiding fragmentation.
// Requests a demand-based release. The target will increase to
// kFractionToReleaseFromCache of the cache, and that is enough to trim
// the fragmentation.
if (GetDemandBasedRelease()) {
cache_.ReleaseCachedPagesByDemand(
NHugePages(0),
Expand Down Expand Up @@ -576,9 +577,10 @@ HugeCache: Subrelease stats last 10 min: total 256000 pages subreleased (0 pages
)"));
}

// Tests the best effort release -- releasing as much as the past demand allows
// even though the release target is zero.
TEST_P(HugeCacheTest, ReleaseByDemandBestEffort) {
// Tests that we can increase the release target to a fraction
// (kFractionToReleaseFromCache) of HugeCache. This can happen regardless of the
// initial value of the target.
TEST_P(HugeCacheTest, ReleaseByDemandIncreaseTarget) {
if (!GetDemandBasedRelease()) {
GTEST_SKIP();
}
Expand Down Expand Up @@ -607,38 +609,47 @@ TEST_P(HugeCacheTest, ReleaseByDemandBestEffort) {
EXPECT_EQ(cache_.usage(), NHugePages(0));

// The past demand is 80 hps (short 10 hps + long 70 hps), and we can unback
// 90 hps.
// 34 hps (170 hps * kFractionToReleaseFromCache), more than the release
// target (0 hps).
HugeLength unbacked_1 = cache_.ReleaseCachedPagesByDemand(
NHugePages(0),
SkipSubreleaseIntervals{.short_interval = absl::Seconds(120),
.long_interval = absl::Seconds(180)},
/*hit_limit=*/false);
EXPECT_EQ(unbacked_1, NHugePages(90));
// The past peak demand is 170 hps, and we can unback zero.
EXPECT_EQ(unbacked_1, NHugePages(34));
// Repeats the test using a non-zero target.
EXPECT_EQ(cache_.size(), NHugePages(136));
HugeLength unbacked_2 = cache_.ReleaseCachedPagesByDemand(
NHugePages(0),
SkipSubreleaseIntervals{.peak_interval = absl::Seconds(130)},
NHugePages(10),
SkipSubreleaseIntervals{.short_interval = absl::Seconds(120),
.long_interval = absl::Seconds(180)},
/*hit_limit=*/false);
EXPECT_EQ(unbacked_2, NHugePages(0));
EXPECT_EQ(unbacked_2, NHugePages(28));

// We do not release anything if the size is at the minimum (10hps).
// First, force the cache to be at the minimum.
// Tests that we always manage to protect the cache limit (10 hps) while
// increasing the target. First, force the cache close to the limit using a
// crafted target.
HugeLength unbacked_3 = cache_.ReleaseCachedPagesByDemand(
NHugePages(70), SkipSubreleaseIntervals{}, /*hit_limit=*/true);
EXPECT_EQ(unbacked_3, NHugePages(70));
EXPECT_EQ(cache_.size(), NHugePages(10));
// Then, ask for release again. There has been no demand in the past 10s so
// there would be no reduction if a release target is proposed.
NHugePages(97), SkipSubreleaseIntervals{}, /*hit_limit=*/true);
EXPECT_EQ(unbacked_3, NHugePages(97));
EXPECT_EQ(cache_.size(), NHugePages(11));
// Then, ask for release using target zero.
HugeLength unbacked_4 = cache_.ReleaseCachedPagesByDemand(
NHugePages(0),
SkipSubreleaseIntervals{.peak_interval = absl::Seconds(10)},
/*hit_limit=*/false);
EXPECT_EQ(unbacked_4, NHugePages(0));
NHugePages(0), SkipSubreleaseIntervals{}, /*hit_limit=*/true);
EXPECT_EQ(unbacked_4, NHugePages(1));
EXPECT_EQ(cache_.size(), NHugePages(10));
// Releases the rest.
// Now the cache is at the limit. Checks if that can be protected.
HugeLength unbacked_5 = cache_.ReleaseCachedPagesByDemand(
NHugePages(10), SkipSubreleaseIntervals{}, /*hit_limit=*/false);
EXPECT_EQ(unbacked_5, NHugePages(10));
NHugePages(0), SkipSubreleaseIntervals{}, /*hit_limit=*/true);
EXPECT_EQ(unbacked_5, NHugePages(0));

// Finally, show that we can release the limit if requested. There has been no
// demand in the past 10s so we can release the rest of the cache.
HugeLength unbacked_6 = cache_.ReleaseCachedPagesByDemand(
NHugePages(100),
SkipSubreleaseIntervals{.peak_interval = absl::Seconds(10)},
/*hit_limit=*/false);
EXPECT_EQ(unbacked_6, NHugePages(10));
}

// Tests releasing zero pages when the cache size and demand are both zero.
Expand Down Expand Up @@ -710,37 +721,6 @@ TEST_P(HugeCacheTest, ReleaseByDemandCappedByDemandPeak) {
NHugePages(15));
}

// Tests that the we can increase the release target if the realized
// fragmentation is high.
TEST_P(HugeCacheTest, ReleaseByDemandIncreaseReleaseTarget) {
if (!GetDemandBasedRelease()) {
GTEST_SKIP();
}
EXPECT_CALL(mock_unback_, Unback(testing::_, testing::_))
.WillRepeatedly(Return(true));

bool released;
// Creates realized fragmentation.
HugeRange large = cache_.Get(NHugePages(100), &released);
Release(large);
Advance(absl::Minutes(5));
HugeRange small = cache_.Get(NHugePages(10), &released);
Release(small);
Advance(absl::Minutes(1));
// Releases more that requested due to high fragmentation.
EXPECT_EQ(cache_.ReleaseCachedPagesByDemand(
NHugePages(10),
SkipSubreleaseIntervals{.short_interval = absl::Minutes(1),
.long_interval = absl::Minutes(1)},
/*hit_limit=*/false),
NHugePages(90));
// Releases the rest.
EXPECT_EQ(cache_.ReleaseCachedPagesByDemand(NHugePages(100),
SkipSubreleaseIntervals{},
/*hit_limit=*/false),
NHugePages(10));
}

// Tests demand-based skip release. The test is a modified version of the
// FillerTest.SkipSubrelease test by removing parts designed particularly for
// subrelease.
Expand Down

0 comments on commit 6fba903

Please sign in to comment.