diff --git a/tests/rptest/clients/kafka_cli_tools.py b/tests/rptest/clients/kafka_cli_tools.py index 8ec3a62547192..e76fa02f95803 100644 --- a/tests/rptest/clients/kafka_cli_tools.py +++ b/tests/rptest/clients/kafka_cli_tools.py @@ -320,7 +320,8 @@ def produce(self, record_size: int, acks: int = -1, throughput: int = -1, - batch_size: int = 81960): + batch_size: int = 81960, + linger_ms: int = 0): self._redpanda.logger.debug("Producing to topic: %s", topic) cmd = [self._script("kafka-producer-perf-test.sh")] cmd += ["--topic", topic] @@ -329,9 +330,11 @@ def produce(self, cmd += ["--throughput", str(throughput)] cmd += [ "--producer-props", - "acks=%d" % acks, "client.id=ducktape", + "acks=%d" % acks, + "client.id=ducktape", "batch.size=%d" % batch_size, - "bootstrap.servers=%s" % self._redpanda.brokers() + "bootstrap.servers=%s" % self._redpanda.brokers(), + "linger.ms=%d" % linger_ms, ] if self._command_config: cmd += ["--producer.config", self._command_config.name] diff --git a/tests/rptest/tests/full_disk_test.py b/tests/rptest/tests/full_disk_test.py index 37d3641e1cd95..79397282a1bfe 100644 --- a/tests/rptest/tests/full_disk_test.py +++ b/tests/rptest/tests/full_disk_test.py @@ -368,6 +368,8 @@ def __init__(self, test_ctx): def test_target_min_capacity_wanted_time_based(self): admin = Admin(self.redpanda) default_segment_size = admin.get_cluster_config()["log_segment_size"] + storage_reserve_min_segments = admin.get_cluster_config( + )["storage_reserve_min_segments"] # produce roughly 30mb at 0.5mb/sec kafka_tools = KafkaCliTools(self.redpanda) @@ -375,28 +377,37 @@ def test_target_min_capacity_wanted_time_based(self): 30 * 1024, 1024, throughput=500, - acks=-1) + acks=-1, + linger_ms=50) node = self.redpanda.nodes[0] reported = admin.get_local_storage_usage( node)["target_min_capacity_wanted"] - # params. the size is about 900k larger than what was written, - # attributable to per record overheads etc... and determined emperically - # by looking at trace log stats. + # The size is slightly larger than what was written, attributable to + # per record overheads, indices, fallocation, etc... The expected size + # is determined empirically by looking at trace log stats. size = 32664482 time = 61 retention = 3600 expected = retention * (size / time) - # factor in the 2 segments worth of space for controller log - diff = abs(reported - expected - 2 * default_segment_size) + # Factor in the full segments worth of space for controller log. + # This mirrors the math in disk_log_impl.cc + controller_want_size = storage_reserve_min_segments * default_segment_size - # there is definitely going to be some fuzz factor needed here and may - # need updated, but after many runs 50mb was a good amount of slack. - assert diff <= ( - 100 * 2**20 - ), f"diff {diff} reported {reported} expected {expected} default seg size {default_segment_size}" + diff = reported - controller_want_size - expected + + # There is definitely going to be some fuzz factor needed here and may + # need updated. + diff_threshold = 100 * 2**20 + + self.logger.info( + f"{diff=} {diff_threshold=} {reported=} {expected=} {controller_want_size=}" + ) + assert abs( + diff + ) <= diff_threshold, f"abs({diff=}) <= {diff_threshold=} {reported=} {expected=} {controller_want_size=}" class LocalDiskReportTest(RedpandaTest):