Skip to content

Commit 68c9aca

Browse files
authored
Merge pull request #23550 from nvartolomei/nv/disk-gc-take-2
rptest: increase threshold for test_full_disk_triggers_gc
2 parents 829205a + cbdc23b commit 68c9aca

File tree

1 file changed

+24
-10
lines changed

1 file changed

+24
-10
lines changed

tests/rptest/tests/full_disk_test.py

+24-10
Original file line numberDiff line numberDiff line change
@@ -253,15 +253,19 @@ class FullDiskReclaimTest(RedpandaTest):
253253
"""
254254
Test that full disk alert triggers eager gc to reclaim space
255255
"""
256-
topics = (TopicSpec(partition_count=10,
256+
partition_count = 10
257+
log_segment_size = 1048576
258+
259+
topics = (TopicSpec(partition_count=partition_count,
257260
retention_bytes=1,
258261
retention_ms=1,
259262
cleanup_policy=TopicSpec.CLEANUP_DELETE), )
260263

261264
def __init__(self, test_ctx):
262265
extra_rp_conf = dict(
263266
log_compaction_interval_ms=24 * 60 * 60 * 1000,
264-
log_segment_size=1048576,
267+
log_segment_size=self.log_segment_size,
268+
log_segment_size_jitter_percent=0,
265269
)
266270
super().__init__(test_context=test_ctx, extra_rp_conf=extra_rp_conf)
267271

@@ -281,18 +285,23 @@ def test_full_disk_triggers_gc(self):
281285
nbytes = lambda mb: mb * 2**20
282286
node = self.redpanda.nodes[0]
283287

288+
produce_size = 3 * self.partition_count * self.log_segment_size
289+
expected_size_after_gc = self.partition_count * self.log_segment_size + nbytes(
290+
1)
291+
assert expected_size_after_gc < produce_size
292+
284293
def observed_data_size(pred):
285294
observed = self.redpanda.data_stat(node)
286295
observed_total = sum(s for path, s in observed
287296
if path.parts[0] == 'kafka')
288297
return pred(observed_total)
289298

290-
# write around 30 megabytes into the topic
291-
produce_total_bytes(self.redpanda, self.topic, nbytes(100))
299+
# write into the topic
300+
produce_total_bytes(self.redpanda, self.topic, produce_size)
292301

293302
# wait until all that data shows up. add some fuzz factor to avoid
294303
# timeouts due to placement skew or other such issues.
295-
wait_until(lambda: observed_data_size(lambda s: s > nbytes(25)),
304+
wait_until(lambda: observed_data_size(lambda s: s >= produce_size),
296305
timeout_sec=30,
297306
backoff_sec=2)
298307

@@ -302,7 +311,7 @@ def observed_data_size(pred):
302311

303312
# wait until all that data shows up. add some fuzz factor to avoid
304313
# timeouts due to placement skew or other such issues.
305-
wait_until(lambda: observed_data_size(lambda s: s > nbytes(25)),
314+
wait_until(lambda: observed_data_size(lambda s: s >= produce_size),
306315
timeout_sec=30,
307316
backoff_sec=2)
308317

@@ -312,10 +321,15 @@ def observed_data_size(pred):
312321
full_disk = FullDiskHelper(self.logger, self.redpanda)
313322
full_disk.trigger_low_space(node=node)
314323

315-
# now wait until the data drops below 1 mb
316-
wait_until(lambda: observed_data_size(lambda s: s < nbytes(1)),
317-
timeout_sec=10,
318-
backoff_sec=2)
324+
# now wait until the data drops
325+
# the expected size is at most one segment for each partition and a
326+
# bit extra for stm snapshots. although we expect the subsystem to
327+
# reclaim all segments there are other internal systems
328+
# (e.g.leadership balancer) which can trigger writes to the partitions.
329+
wait_until(
330+
lambda: observed_data_size(lambda s: s < expected_size_after_gc),
331+
timeout_sec=10,
332+
backoff_sec=2)
319333

320334

321335
class LocalDiskReportTimeTest(RedpandaTest):

0 commit comments

Comments
 (0)