diff --git a/build.gradle b/build.gradle index dc3bf215ec88f..00d52f3e73782 100644 --- a/build.gradle +++ b/build.gradle @@ -41,6 +41,7 @@ plugins { id 'org.scoverage' version '8.0.3' apply false id 'com.gradleup.shadow' version '8.3.9' apply false id 'com.diffplug.spotless' version "7.2.1" + id 'net.ltgt.errorprone' version '4.3.0' apply false } ext { @@ -76,7 +77,7 @@ ext { // until version can be upgraded: https://github.com/spotbugs/spotbugs/issues/3564 project.gradle.startParameter.excludedTaskNames.add("spotbugsMain") project.gradle.startParameter.excludedTaskNames.add("spotbugsTest") - } + } maxTestForks = project.hasProperty('maxParallelForks') ? maxParallelForks.toInteger() : Runtime.runtime.availableProcessors() maxScalacThreads = project.hasProperty('maxScalacThreads') ? maxScalacThreads.toInteger() : @@ -228,6 +229,8 @@ allprojects { delete "${projectDir}/src/generated" delete "${projectDir}/src/generated-test" } + + apply from: rootProject.file('gradle/error-prone.gradle') } def determineCommitId() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java index c38b5859f5f59..8e174efa676bb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java @@ -387,7 +387,7 @@ private boolean isSticky() { if (hasCycles(topicMovementPairs)) { log.error("Stickiness is violated for topic {}" + "\nPartition movements for this topic occurred among the following consumer pairs:" - + "\n{}", topicMovements.getKey(), topicMovements.getValue().toString()); + + "\n{}", topicMovements.getKey(), topicMovements.getValue()); return false; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/RecordMetadata.java b/clients/src/main/java/org/apache/kafka/clients/producer/RecordMetadata.java index 0c974288caae1..5617ccb7158d1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/RecordMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/RecordMetadata.java @@ -119,6 +119,6 @@ public int partition() { @Override public String toString() { - return topicPartition.toString() + "@" + offset; + return topicPartition + "@" + offset; } } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java index 182a8c7484931..66e1b4a0b4446 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java @@ -661,7 +661,7 @@ public MetricName metricInstance(MetricNameTemplate template, Map iter = removable.iterator(); @@ -191,13 +191,13 @@ public void increment(K key) { gauges.remove(keyToRemove); pending.push(new PendingMetricsChange(metricNameToRemove, null)); log.trace("{}: Removing the metric {}, which has a value of 0.", - suiteName, keyToRemove.toString()); + suiteName, keyToRemove); } MetricName metricNameToAdd = metricNameCalculator.apply(key); gauge = new StoredIntGauge(metricNameToAdd); gauges.put(key, gauge); pending.push(new PendingMetricsChange(metricNameToAdd, gauge)); - log.trace("{}: Adding a new metric {}.", suiteName, key.toString()); + log.trace("{}: Adding a new metric {}.", suiteName, key); } // Drop the object monitor and perform any pending metrics additions or removals. performPendingMetricsOperations(); @@ -236,17 +236,17 @@ private void performPendingMetricsOperations() { public synchronized void decrement(K key) { if (closed) { log.warn("{}: Attempted to decrement {}, but the gauge suite was closed.", - suiteName, key.toString()); + suiteName, key); return; } StoredIntGauge gauge = gauges.get(key); if (gauge == null) { log.debug("{}: Attempted to decrement {}, but no such metric was registered.", - suiteName, key.toString()); + suiteName, key); } else { int cur = gauge.decrement(); log.trace("{}: Removed a reference to {}. {} reference(s) remaining.", - suiteName, key.toString(), cur); + suiteName, key, cur); if (cur <= 0) { removable.add(key); } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java index fe79bb94e6517..b8d195ea22724 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java @@ -57,7 +57,7 @@ public static String deepToString(Iterator iter) { while (iter.hasNext()) { Object object = iter.next(); bld.append(prefix); - bld.append(object.toString()); + bld.append(object); prefix = ", "; } bld.append("]"); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/types/Schema.java b/clients/src/main/java/org/apache/kafka/common/protocol/types/Schema.java index 325a77fe43ffc..8719e08e458c4 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/types/Schema.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/types/Schema.java @@ -181,7 +181,7 @@ public String toString() { StringBuilder b = new StringBuilder(); b.append('{'); for (int i = 0; i < this.fields.length; i++) { - b.append(this.fields[i].toString()); + b.append(this.fields[i]); if (i < this.fields.length - 1) b.append(','); } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java b/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java index 521f6346ed916..d2ff2aeca2e1d 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java @@ -138,7 +138,7 @@ public String toString() { for (Map.Entry field : fields.entrySet()) { bld.append(prefix); prefix = ", "; - bld.append(field.getKey()).append(" -> ").append(field.getValue().toString()); + bld.append(field.getKey()).append(" -> ").append(field.getValue()); } bld.append(")"); return bld.toString(); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 347f76135866d..e9a34edd975f7 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -892,7 +892,7 @@ private KafkaMetric getMetric(String name, Map tags) throws Exce entry.getKey().name().equals(name) && entry.getKey().tags().equals(tags)) .findFirst(); if (metric.isEmpty()) - throw new Exception(String.format("Could not find metric called %s with tags %s", name, tags.toString())); + throw new Exception(String.format("Could not find metric called %s with tags %s", name, tags)); return metric.get().getValue(); } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java index 6a412112c3f8a..693268bd151ac 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java @@ -226,12 +226,12 @@ public void awaitStop() { private void checkHerder(SourceAndTarget sourceAndTarget) { if (!herders.containsKey(sourceAndTarget)) { - throw new IllegalArgumentException("No herder for " + sourceAndTarget.toString()); + throw new IllegalArgumentException("No herder for " + sourceAndTarget); } } private void addHerder(SourceAndTarget sourceAndTarget) { - log.info("creating herder for {}", sourceAndTarget.toString()); + log.info("creating herder for {}", sourceAndTarget); Map workerProps = config.workerConfig(sourceAndTarget); DistributedConfig distributedConfig = new DistributedConfig(workerProps); String encodedSource = encodePath(sourceAndTarget.source()); @@ -289,13 +289,13 @@ private static String encodePath(String rawPath) { private String generateWorkerId(SourceAndTarget sourceAndTarget) { if (config.enableInternalRest()) { - return advertisedUrl.getHost() + ":" + advertisedUrl.getPort() + "/" + sourceAndTarget.toString(); + return advertisedUrl.getHost() + ":" + advertisedUrl.getPort() + "/" + sourceAndTarget; } try { //UUID to make sure it is unique even if multiple workers running on the same host - return InetAddress.getLocalHost().getCanonicalHostName() + "/" + sourceAndTarget.toString() + "/" + UUID.randomUUID(); + return InetAddress.getLocalHost().getCanonicalHostName() + "/" + sourceAndTarget + "/" + UUID.randomUUID(); } catch (UnknownHostException e) { - return sourceAndTarget.toString() + "/" + UUID.randomUUID(); + return sourceAndTarget + "/" + UUID.randomUUID(); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/converters/BooleanConverter.java b/connect/runtime/src/main/java/org/apache/kafka/connect/converters/BooleanConverter.java index 3c030713021d4..609d1953f046e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/converters/BooleanConverter.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/converters/BooleanConverter.java @@ -71,7 +71,7 @@ public void configure(Map configs, boolean isKey) { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Type.BOOLEAN) - throw new DataException("Invalid schema type for BooleanConverter: " + schema.type().toString()); + throw new DataException("Invalid schema type for BooleanConverter: " + schema.type()); try { return serializer.serialize(topic, (Boolean) value); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/converters/ByteArrayConverter.java b/connect/runtime/src/main/java/org/apache/kafka/connect/converters/ByteArrayConverter.java index ec934ad56cc42..256f9e8f516ff 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/converters/ByteArrayConverter.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/converters/ByteArrayConverter.java @@ -58,7 +58,7 @@ public void configure(Map configs, boolean isKey) { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Schema.Type.BYTES) - throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString()); + throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type()); if (value != null && !(value instanceof byte[]) && !(value instanceof ByteBuffer)) throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass()); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java index a83c515e73adf..7c550679e003f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java @@ -185,7 +185,7 @@ static String prefixFor(String connectorName, Scope scope, Integer taskNumber) { // Append non-task scopes (e.g., worker and offset) if (scope != Scope.TASK) { sb.append("|"); - sb.append(scope.toString()); + sb.append(scope); } sb.append("] "); return sb.toString(); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java index 7c13ef4d785de..1d5e13bad3647 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java @@ -283,7 +283,7 @@ private static Object castValueToType(Schema schema, Object value, Schema.Type t default -> throw new DataException(targetType + " is not supported in the Cast transformation."); }; } catch (NumberFormatException e) { - throw new DataException("Value (" + value.toString() + ") was out of range for requested data type", e); + throw new DataException("Value (" + value + ") was out of range for requested data type", e); } } diff --git a/generator/src/main/java/org/apache/kafka/message/FieldType.java b/generator/src/main/java/org/apache/kafka/message/FieldType.java index 4b204fb4950a5..beaa8ec2abec2 100644 --- a/generator/src/main/java/org/apache/kafka/message/FieldType.java +++ b/generator/src/main/java/org/apache/kafka/message/FieldType.java @@ -379,7 +379,7 @@ public String elementName() { @Override public String toString() { - return "[]" + elementType.toString(); + return "[]" + elementType; } } diff --git a/generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java b/generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java index 16a611c325030..a5a31b3b13d6d 100644 --- a/generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java +++ b/generator/src/main/java/org/apache/kafka/message/JsonConverterGenerator.java @@ -262,7 +262,7 @@ private void generateVariableLengthTargetFromJson(Target target, Versions curVer } else if (target.field().type().isStruct()) { buffer.printf("%s;%n", target.assignmentStatement( String.format("%s%s.read(%s, _version)", - target.field().type().toString(), SUFFIX, target.sourceVariable()))); + target.field().type(), SUFFIX, target.sourceVariable()))); } else { throw new RuntimeException("Unexpected type " + target.field().type()); } @@ -439,7 +439,7 @@ private void generateVariableLengthTargetToJson(Target target, Versions versions } else if (target.field().type().isStruct()) { buffer.printf("%s;%n", target.assignmentStatement( String.format("%sJsonConverter.write(%s, _version, _serializeRecords)", - target.field().type().toString(), target.sourceVariable()))); + target.field().type(), target.sourceVariable()))); } else { throw new RuntimeException("unknown type " + target.field().type()); } diff --git a/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java b/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java index 864e989f4c7c8..2e536be468941 100644 --- a/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java +++ b/generator/src/main/java/org/apache/kafka/message/MessageGenerator.java @@ -250,7 +250,7 @@ public static void processDirectories(String packageName, numProcessed++; typeClassGenerators.forEach(generator -> generator.registerMessageType(spec)); } catch (Exception e) { - throw new RuntimeException("Exception while processing " + inputPath.toString(), e); + throw new RuntimeException("Exception while processing " + inputPath, e); } } } diff --git a/gradle/error-prone.gradle b/gradle/error-prone.gradle new file mode 100644 index 0000000000000..1c54e2bb29198 --- /dev/null +++ b/gradle/error-prone.gradle @@ -0,0 +1,37 @@ +import static java.lang.System.getenv + +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +apply plugin: 'net.ltgt.errorprone' + +dependencies { + errorprone('com.google.errorprone:error_prone_core:2.42.0') + errorprone('tech.picnic.error-prone-support:error-prone-contrib:0.25.0') +} + +tasks.withType(JavaCompile).configureEach { + options.errorprone { + disableAllChecks = true // consider removal to avoid error prone error“s, following convention over configuration. + error('RedundantStringConversion') + errorproneArgs.add('-XepExcludedPaths:.*/build/generated/.*') + if (!getenv().containsKey('CI') && getenv('IN_PLACE')?.toBoolean()) { + errorproneArgs.addAll( + '-XepPatchLocation:IN_PLACE', + '-XepPatchChecks:RedundantStringConversion' + ) + } + } +} diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java index be9c4f9abc311..d3b238af72561 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java @@ -3699,8 +3699,8 @@ private StreamsGroupMember maybeReconcile( log.info("[GroupId {}][MemberId {}] Member's new assignment state: epoch={}, previousEpoch={}, state={}, " + "assignedTasks={} and tasksPendingRevocation={}.", groupId, updatedMember.memberId(), updatedMember.memberEpoch(), updatedMember.previousMemberEpoch(), updatedMember.state(), - updatedMember.assignedTasks().toString(), - updatedMember.tasksPendingRevocation().toString()); + updatedMember.assignedTasks(), + updatedMember.tasksPendingRevocation()); // Schedule/cancel the rebalance timeout. if (updatedMember.state() == org.apache.kafka.coordinator.group.streams.MemberState.UNREVOKED_TASKS) { diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index 2ce4b2f420fae..a0684b5d3e79b 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -659,10 +659,10 @@ private void replayRegistrationChange( BrokerRegistration curRegistration = brokerRegistrations.get(brokerId); if (curRegistration == null) { throw new RuntimeException(String.format("Unable to replay %s: no broker " + - "registration found for that id", record.toString())); + "registration found for that id", record)); } else if (curRegistration.epoch() != brokerEpoch) { throw new RuntimeException(String.format("Unable to replay %s: no broker " + - "registration with that epoch found", record.toString())); + "registration with that epoch found", record)); } else { BrokerRegistration nextRegistration = curRegistration.cloneWith( fencingChange, diff --git a/metadata/src/main/java/org/apache/kafka/image/loader/MetadataLoader.java b/metadata/src/main/java/org/apache/kafka/image/loader/MetadataLoader.java index a1513a0c4c04b..2119702704bd8 100644 --- a/metadata/src/main/java/org/apache/kafka/image/loader/MetadataLoader.java +++ b/metadata/src/main/java/org/apache/kafka/image/loader/MetadataLoader.java @@ -331,7 +331,7 @@ private void maybePublishMetadata(MetadataDelta delta, MetadataImage image, Load this.image = image; if (stillNeedToCatchUp( - "maybePublishMetadata(" + manifest.type().toString() + ")", + "maybePublishMetadata(" + manifest.type() + ")", manifest.provenance().lastContainedOffset()) ) { return; diff --git a/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java b/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java index 47e0c74b85065..474a48e5e1f93 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/VersionRange.java @@ -72,9 +72,9 @@ public String toString() { if (min == max) { return String.valueOf(min); } else if (max == Short.MAX_VALUE) { - return String.valueOf(min) + "+"; + return min + "+"; } else { - return String.valueOf(min) + "-" + String.valueOf(max); + return min + "-" + max; } } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/MockRaftClientListener.java b/metadata/src/test/java/org/apache/kafka/controller/MockRaftClientListener.java index b4ee74346ee98..bfdafe43bb503 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/MockRaftClientListener.java +++ b/metadata/src/test/java/org/apache/kafka/controller/MockRaftClientListener.java @@ -54,7 +54,7 @@ public synchronized void handleCommit(BatchReader reader) for (ApiMessageAndVersion messageAndVersion : batch.records()) { ApiMessage message = messageAndVersion.message(); - serializedEvents.add(COMMIT + " " + message.toString()); + serializedEvents.add(COMMIT + " " + message); } serializedEvents.add(LAST_COMMITTED_OFFSET + " " + lastCommittedOffset); } @@ -72,7 +72,7 @@ public synchronized void handleLoadSnapshot(SnapshotReader for (ApiMessageAndVersion messageAndVersion : batch.records()) { ApiMessage message = messageAndVersion.message(); - serializedEvents.add(SNAPSHOT + " " + message.toString()); + serializedEvents.add(SNAPSHOT + " " + message); } serializedEvents.add(LAST_COMMITTED_OFFSET + " " + lastCommittedOffset); } diff --git a/raft/src/main/java/org/apache/kafka/raft/DynamicVoters.java b/raft/src/main/java/org/apache/kafka/raft/DynamicVoters.java index 8f746ff2c48be..c862fd404e469 100644 --- a/raft/src/main/java/org/apache/kafka/raft/DynamicVoters.java +++ b/raft/src/main/java/org/apache/kafka/raft/DynamicVoters.java @@ -109,7 +109,7 @@ public String toString() { for (DynamicVoter voter : voters.values()) { builder.append(prefix); prefix = ","; - builder.append(voter.toString()); + builder.append(voter); } return builder.toString(); } diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java index c3ecb2c20c6c2..b448332358f79 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java @@ -478,7 +478,7 @@ public void testInvalidVoterReplicaPreVoteRequest(KRaftVersion kraftVersion) thr // invalid voter id is rejected context.deliverRequest( context.voteRequest( - context.clusterId.toString(), + context.clusterId, epoch, otherNodeKey, ReplicaKey.of(10, Uuid.randomUuid()), @@ -493,7 +493,7 @@ public void testInvalidVoterReplicaPreVoteRequest(KRaftVersion kraftVersion) thr // invalid voter directory id is rejected context.deliverRequest( context.voteRequest( - context.clusterId.toString(), + context.clusterId, epoch, otherNodeKey, ReplicaKey.of(0, Uuid.randomUuid()), diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java index fd696458b80aa..8112e5c9f4b74 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java @@ -838,7 +838,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchSnapshotRequestFromMajo int epoch = context.currentEpoch(); FetchSnapshotRequestData voter1FetchSnapshotRequest = fetchSnapshotRequest( - context.clusterId.toString(), + context.clusterId, voter1, context.metadataPartition, epoch, @@ -848,7 +848,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchSnapshotRequestFromMajo ); FetchSnapshotRequestData voter2FetchSnapshotRequest = fetchSnapshotRequest( - context.clusterId.toString(), + context.clusterId, voter2, context.metadataPartition, epoch, @@ -858,7 +858,7 @@ public void testLeaderShouldResignLeadershipIfNotGetFetchSnapshotRequestFromMajo ); FetchSnapshotRequestData observerFetchSnapshotRequest = fetchSnapshotRequest( - context.clusterId.toString(), + context.clusterId, observer3, context.metadataPartition, epoch, @@ -1867,7 +1867,7 @@ public void testFetchSnapshotRequestClusterIdValidation( // valid cluster id is accepted context.deliverRequest( fetchSnapshotRequest( - context.clusterId.toString(), + context.clusterId, otherNode, context.metadataPartition, epoch, diff --git a/server/src/main/java/org/apache/kafka/server/purgatory/DeleteRecordsPartitionStatus.java b/server/src/main/java/org/apache/kafka/server/purgatory/DeleteRecordsPartitionStatus.java index f26cce7fcd4ca..17e0b4bcf3774 100644 --- a/server/src/main/java/org/apache/kafka/server/purgatory/DeleteRecordsPartitionStatus.java +++ b/server/src/main/java/org/apache/kafka/server/purgatory/DeleteRecordsPartitionStatus.java @@ -50,7 +50,7 @@ public long requiredOffset() { @Override public String toString() { return String.format("[acksPending: %b, error: %s, lowWatermark: %d, requiredOffset: %d]", - acksPending, Errors.forCode(responseStatus.errorCode()).toString(), responseStatus.lowWatermark(), + acksPending, Errors.forCode(responseStatus.errorCode()), responseStatus.lowWatermark(), requiredOffset); } diff --git a/server/src/main/java/org/apache/kafka/server/quota/ClientQuotaManager.java b/server/src/main/java/org/apache/kafka/server/quota/ClientQuotaManager.java index c91f6bd299a0d..f6de69ce81ef9 100644 --- a/server/src/main/java/org/apache/kafka/server/quota/ClientQuotaManager.java +++ b/server/src/main/java/org/apache/kafka/server/quota/ClientQuotaManager.java @@ -482,11 +482,11 @@ private String metricTagsToSensorSuffix(Map metricTags) { } private String getThrottleTimeSensorName(Map metricTags) { - return quotaType.toString() + "ThrottleTime-" + metricTagsToSensorSuffix(metricTags); + return quotaType + "ThrottleTime-" + metricTagsToSensorSuffix(metricTags); } private String getQuotaSensorName(Map metricTags) { - return quotaType.toString() + "-" + metricTagsToSensorSuffix(metricTags); + return quotaType + "-" + metricTagsToSensorSuffix(metricTags); } protected MetricConfig getQuotaMetricConfig(Map metricTags) { diff --git a/server/src/main/java/org/apache/kafka/server/share/fetch/InFlightState.java b/server/src/main/java/org/apache/kafka/server/share/fetch/InFlightState.java index 1dcdb52c90a5e..3c2d6f093d4ac 100644 --- a/server/src/main/java/org/apache/kafka/server/share/fetch/InFlightState.java +++ b/server/src/main/java/org/apache/kafka/server/share/fetch/InFlightState.java @@ -273,7 +273,7 @@ public boolean equals(Object o) { @Override public String toString() { return "InFlightState(" + - "state=" + state.toString() + + "state=" + state + ", deliveryCount=" + deliveryCount + ", memberId=" + memberId + ")"; diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/serialization/RemoteLogMetadataSerde.java b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/serialization/RemoteLogMetadataSerde.java index 3b8efbabace70..071748f9efd22 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/serialization/RemoteLogMetadataSerde.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/serialization/RemoteLogMetadataSerde.java @@ -114,7 +114,7 @@ public void writeTo(ConsumerRecord consumerRecord, PrintStream o output.printf("partition: %d, offset: %d, value: %s%n", consumerRecord.partition(), consumerRecord.offset(), - remoteLogMetadataSerde.deserialize(consumerRecord.value()).toString()); + remoteLogMetadataSerde.deserialize(consumerRecord.value())); } } } diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/CleanShutdownFileHandler.java b/storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/CleanShutdownFileHandler.java index 4f476f3a0557b..6bd43d85b09b0 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/CleanShutdownFileHandler.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/CleanShutdownFileHandler.java @@ -109,6 +109,6 @@ public boolean exists() { @Override public String toString() { - return "CleanShutdownFile=(" + "file=" + cleanShutdownFile.toString() + ')'; + return "CleanShutdownFile=(" + "file=" + cleanShutdownFile + ')'; } } \ No newline at end of file diff --git a/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteIndexCache.java b/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteIndexCache.java index 325080fb0a894..e395ea85af89e 100644 --- a/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteIndexCache.java +++ b/storage/src/main/java/org/apache/kafka/storage/internals/log/RemoteIndexCache.java @@ -670,7 +670,7 @@ private static String generateFileNamePrefixForIndex(RemoteLogSegmentMetadata re long startOffset = remoteLogSegmentMetadata.startOffset(); Uuid segmentId = remoteLogSegmentMetadata.remoteLogSegmentId().id(); // uuid.toString uses URL encoding which is safe for filenames and URLs. - return startOffset + "_" + segmentId.toString(); + return startOffset + "_" + segmentId; } static File remoteOffsetIndexFile(File dir, RemoteLogSegmentMetadata remoteLogSegmentMetadata) { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/StrictBufferConfigImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/StrictBufferConfigImpl.java index 34a9ce69ac5e0..0ac6109269e31 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/StrictBufferConfigImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/suppress/StrictBufferConfigImpl.java @@ -119,7 +119,7 @@ public String toString() { return "StrictBufferConfigImpl{maxKeys=" + maxRecords + ", maxBytes=" + maxBytes + ", bufferFullStrategy=" + bufferFullStrategy + - ", logConfig=" + logConfig().toString() + + ", logConfig=" + logConfig() + '}'; } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ClientUtils.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ClientUtils.java index 87a25e65b6212..8c5b246d7addb 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ClientUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ClientUtils.java @@ -161,11 +161,11 @@ public static Map getEndOffsets(final Lis result.put(partition, future.get()); } catch (final ExecutionException e) { final Throwable cause = e.getCause(); - final String msg = String.format("Error while attempting to read end offsets for partition '%s'", partition.toString()); + final String msg = String.format("Error while attempting to read end offsets for partition '%s'", partition); throw new StreamsException(msg, cause); } catch (final InterruptedException e) { Thread.interrupted(); - final String msg = String.format("Interrupted while attempting to read end offsets for partition '%s'", partition.toString()); + final String msg = String.format("Interrupted while attempting to read end offsets for partition '%s'", partition); throw new StreamsException(msg, e); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java index 2503a35841963..d70970f4ee2cf 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java @@ -1347,7 +1347,7 @@ public AutoOffsetResetStrategy offsetResetStrategy(final String topic) { latestResetPatterns.stream().anyMatch(p -> p.matcher(topic).matches())) { return AutoOffsetResetStrategy.LATEST; } else if (maybeDecorateInternalSourceTopics(durationResetTopics.keySet()).contains(topic)) { - return AutoOffsetResetStrategy.fromString("by_duration:" + durationResetTopics.get(topic).toString()); + return AutoOffsetResetStrategy.fromString("by_duration:" + durationResetTopics.get(topic)); } else if ((resetDuration = findDuration(topic)).isPresent()) { return AutoOffsetResetStrategy.fromString("by_duration:" + resetDuration.get()); } else if (containsTopic(topic)) { @@ -1719,8 +1719,8 @@ public TopologyDescription.Processor processor() { @Override public String toString() { return "Sub-topology: " + id + " for global store (will not generate tasks)\n" - + " " + source.toString() + "\n" - + " " + processor.toString() + "\n"; + + " " + source + "\n" + + " " + processor + "\n"; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/QuickUnion.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/QuickUnion.java index f91bdcce50ae1..8adc8cbc86fd5 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/QuickUnion.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/QuickUnion.java @@ -39,7 +39,7 @@ public T root(final T id) { T parent = ids.get(current); if (parent == null) { - throw new NoSuchElementException("id: " + id.toString()); + throw new NoSuchElementException("id: " + id); } while (!parent.equals(current)) { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java index c4178733ef319..e6ee03665350b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java @@ -465,7 +465,7 @@ private void recordSendError(final String topic, final ProducerRecord serializedRecord, final InternalProcessorContext context, final String processorNodeId) { - String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, productionException.toString()); + String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, productionException); if (isFatalException(productionException)) { errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since this is a fatal error."; diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StampedRecord.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StampedRecord.java index dd0a1298b6767..23c56b64f7708 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StampedRecord.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StampedRecord.java @@ -79,7 +79,7 @@ public byte[] rawValue() { @Override public String toString() { - return value.toString() + ", timestamp = " + timestamp; + return value + ", timestamp = " + timestamp; } @Override diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java index 42b57e46aa4f3..6350aa8c91f48 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamTask.java @@ -1045,7 +1045,7 @@ private void resetOffsetsIfNeededAndInitializeMetadata(final java.util.function. log.warn( "Encountered {} while trying to fetch committed offsets, will retry initializing the metadata in the next loop." + "\nConsider overwriting consumer config {} to a larger value to avoid timeout errors", - timeoutException.toString(), + timeoutException, ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); // re-throw to trigger `task.timeout.ms` diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java index fe04f2c4613f2..f57a92d2eced7 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java @@ -490,7 +490,7 @@ public String topologyDescriptionString() { } final StringBuilder sb = new StringBuilder(); - applyToEachBuilder(b -> sb.append(b.describe().toString())); + applyToEachBuilder(b -> sb.append(b.describe())); return sb.toString(); } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStore.java index e05b6328ec8b3..bbf925863f5e8 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStore.java @@ -216,13 +216,13 @@ public byte[] get(final Bytes rawKey) { if (baseKeySchema instanceof PrefixedWindowKeySchemas.TimeFirstWindowKeySchema) { if (timestampFromRawKey < observedStreamTime - retentionPeriod) { LOG.debug("Record with key {} is expired as timestamp from key ({}) < actual stream time ({})", - rawKey.toString(), timestampFromRawKey, observedStreamTime - retentionPeriod); + rawKey, timestampFromRawKey, observedStreamTime - retentionPeriod); return null; } } else { if (timestampFromRawKey < observedStreamTime - retentionPeriod + 1) { LOG.debug("Record with key {} is expired as timestamp from key ({}) < actual stream time ({})", - rawKey.toString(), timestampFromRawKey, observedStreamTime - retentionPeriod + 1); + rawKey, timestampFromRawKey, observedStreamTime - retentionPeriod + 1); return null; } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStore.java index bde8d8319197d..8394a7e5e595e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStore.java @@ -95,7 +95,7 @@ KeyValueIterator fetch(final Bytes key, final long actualFrom = getActualFrom(from); if (keySchema instanceof WindowKeySchema && to < actualFrom) { - LOG.debug("Returning no records for key {} as to ({}) < actualFrom ({}) ", key.toString(), to, actualFrom); + LOG.debug("Returning no records for key {} as to ({}) < actualFrom ({}) ", key, to, actualFrom); return KeyValueIterators.emptyIterator(); } @@ -275,7 +275,7 @@ public byte[] get(final Bytes key) { // check if timestamp is expired if (timestampFromKey < observedStreamTime - retentionPeriod + 1) { LOG.debug("Record with key {} is expired as timestamp from key ({}) < actual stream time ({})", - key.toString(), timestampFromKey, observedStreamTime - retentionPeriod + 1); + key, timestampFromKey, observedStreamTime - retentionPeriod + 1); return null; } final S segment = segments.segmentForTimestamp(timestampFromKey); diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java index ede618237cf35..6bcb9cb900c6e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBStore.java @@ -313,7 +313,7 @@ protected List openRocksDB(final DBOptions dbOptions, return mergeColumnFamilyHandleLists(existingColumnFamilies, createdColumnFamilies, allDescriptors); } catch (final RocksDBException e) { - throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir.toString(), e); + throw new ProcessorStateException("Error opening store " + name + " at location " + dbDir, e); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecordingTrigger.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecordingTrigger.java index d93f985692534..d732128adc1ed 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecordingTrigger.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/metrics/RocksDBMetricsRecordingTrigger.java @@ -34,7 +34,7 @@ public void addMetricsRecorder(final RocksDBMetricsRecorder metricsRecorder) { final String metricsRecorderName = metricsRecorderName(metricsRecorder); if (metricsRecordersToTrigger.containsKey(metricsRecorderName)) { throw new IllegalStateException("RocksDB metrics recorder for store \"" + metricsRecorder.storeName() + - "\" of task " + metricsRecorder.taskId().toString() + " has already been added. " + "\" of task " + metricsRecorder.taskId() + " has already been added. " + "This is a bug in Kafka Streams."); } metricsRecordersToTrigger.put(metricsRecorderName, metricsRecorder); @@ -51,7 +51,7 @@ public void removeMetricsRecorder(final RocksDBMetricsRecorder metricsRecorder) } private String metricsRecorderName(final RocksDBMetricsRecorder metricsRecorder) { - return metricsRecorder.taskId().toString() + "-" + metricsRecorder.storeName(); + return metricsRecorder.taskId() + "-" + metricsRecorder.storeName(); } @Override diff --git a/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java b/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java index ae16d11d8edea..23877ab26786c 100644 --- a/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java +++ b/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java @@ -246,7 +246,7 @@ public Map fetchOffsets(GetOffsetShellOptions options) thr partitionInfo = listOffsetsResult.partitionResult(partition).get(); } catch (ExecutionException e) { if (e.getCause() instanceof KafkaException) { - System.err.println("Skip getting offsets for topic-partition " + partition.toString() + " due to error: " + e.getMessage()); + System.err.println("Skip getting offsets for topic-partition " + partition + " due to error: " + e.getMessage()); } else { throw e; } diff --git a/tools/src/main/java/org/apache/kafka/tools/JmxTool.java b/tools/src/main/java/org/apache/kafka/tools/JmxTool.java index cc6f310d736a0..a224984e46cdb 100644 --- a/tools/src/main/java/org/apache/kafka/tools/JmxTool.java +++ b/tools/src/main/java/org/apache/kafka/tools/JmxTool.java @@ -251,11 +251,11 @@ private static Map queryAttributes(MBeanServerConnection conn, for (Attribute attribute : attributes.asList()) { if (attributesInclude.isPresent()) { if (List.of(attributesInclude.get()).contains(attribute.getName())) { - result.put(String.format("%s:%s", objectName.toString(), attribute.getName()), + result.put(String.format("%s:%s", objectName, attribute.getName()), attribute.getValue()); } } else { - result.put(String.format("%s:%s", objectName.toString(), attribute.getName()), + result.put(String.format("%s:%s", objectName, attribute.getName()), attribute.getValue()); } } diff --git a/tools/src/main/java/org/apache/kafka/tools/LogCompactionTester.java b/tools/src/main/java/org/apache/kafka/tools/LogCompactionTester.java index fb02368396810..0c525ec35f91b 100644 --- a/tools/src/main/java/org/apache/kafka/tools/LogCompactionTester.java +++ b/tools/src/main/java/org/apache/kafka/tools/LogCompactionTester.java @@ -370,7 +370,7 @@ private static BufferedReader externalSort(File file) throws IOException { ProcessBuilder builder = new ProcessBuilder( "sort", "--key=1,2", "--stable", "--buffer-size=20%", - "--temporary-directory=" + tempDir.toString(), file.getAbsolutePath()); + "--temporary-directory=" + tempDir, file.getAbsolutePath()); builder.redirectError(ProcessBuilder.Redirect.INHERIT); Process process; diff --git a/tools/src/main/java/org/apache/kafka/tools/MetadataQuorumCommand.java b/tools/src/main/java/org/apache/kafka/tools/MetadataQuorumCommand.java index e42dcbb0e09c0..a223dc8c8ec6f 100644 --- a/tools/src/main/java/org/apache/kafka/tools/MetadataQuorumCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/MetadataQuorumCommand.java @@ -323,7 +323,7 @@ public String toString() { sb.append(", \"endpoints\": ["); for (RaftVoterEndpoint endpoint : endpoints) { sb.append("\""); - sb.append(endpoint.toString()).append("\", "); + sb.append(endpoint).append("\", "); } sb.setLength(sb.length() - 2); // remove the last comma and space sb.append("]"); diff --git a/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java b/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java index fa21432b5870a..924d00d3383cd 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java +++ b/tools/src/main/java/org/apache/kafka/tools/ProducerPerformance.java @@ -648,7 +648,7 @@ public ConfigPostProcessor(ArgumentParser parser, String[] args) throws IOExcept Optional txIdInProps = Optional.ofNullable(producerProps.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG)) .map(Object::toString); - String transactionId = Optional.ofNullable(transactionIdArg).orElse(txIdInProps.orElse(DEFAULT_TRANSACTION_ID_PREFIX + Uuid.randomUuid().toString())); + String transactionId = Optional.ofNullable(transactionIdArg).orElse(txIdInProps.orElse(DEFAULT_TRANSACTION_ID_PREFIX + Uuid.randomUuid())); producerProps.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId); if (transactionDurationMsArg == null) { diff --git a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ShareGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ShareGroupCommand.java index 50565dcc0ba02..ac18589d2fd2a 100644 --- a/tools/src/main/java/org/apache/kafka/tools/consumer/group/ShareGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/consumer/group/ShareGroupCommand.java @@ -567,12 +567,12 @@ private void printStates(Map descriptions, boolea if (verbose) { String fmt = "\n%" + -groupLen + "s %" + -coordinatorLen + "s %-15s %-12s %-17s %s"; System.out.printf(fmt, "GROUP", "COORDINATOR (ID)", "STATE", "GROUP-EPOCH", "ASSIGNMENT-EPOCH", "#MEMBERS"); - System.out.printf(fmt, groupId, coordinator, description.groupState().toString(), + System.out.printf(fmt, groupId, coordinator, description.groupState(), description.groupEpoch(), description.targetAssignmentEpoch(), description.members().size()); } else { String fmt = "\n%" + -groupLen + "s %" + -coordinatorLen + "s %-15s %s"; System.out.printf(fmt, "GROUP", "COORDINATOR (ID)", "STATE", "#MEMBERS"); - System.out.printf(fmt, groupId, coordinator, description.groupState().toString(), description.members().size()); + System.out.printf(fmt, groupId, coordinator, description.groupState(), description.members().size()); } System.out.println(); }); diff --git a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java index 0c54f6c53f99d..0837e99102906 100644 --- a/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/streams/StreamsGroupCommand.java @@ -361,12 +361,12 @@ private void printStates(StreamsGroupDescription description, boolean verbose) { if (!verbose) { String fmt = "%" + -groupLen + "s %" + -coordinatorLen + "s %" + -stateLen + "s %s\n"; System.out.printf(fmt, "GROUP", "COORDINATOR (ID)", "STATE", "#MEMBERS"); - System.out.printf(fmt, description.groupId(), coordinator, description.groupState().toString(), description.members().size()); + System.out.printf(fmt, description.groupId(), coordinator, description.groupState(), description.members().size()); } else { final int groupEpochLen = 15, targetAssignmentEpochLen = 25; String fmt = "%" + -groupLen + "s %" + -coordinatorLen + "s %" + -stateLen + "s %" + -groupEpochLen + "s %" + -targetAssignmentEpochLen + "s %s\n"; System.out.printf(fmt, "GROUP", "COORDINATOR (ID)", "STATE", "GROUP-EPOCH", "TARGET-ASSIGNMENT-EPOCH", "#MEMBERS"); - System.out.printf(fmt, description.groupId(), coordinator, description.groupState().toString(), description.groupEpoch(), description.targetAssignmentEpoch(), description.members().size()); + System.out.printf(fmt, description.groupId(), coordinator, description.groupState(), description.groupEpoch(), description.targetAssignmentEpoch(), description.members().size()); } } diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/workload/ExternalCommandWorker.java b/trogdor/src/main/java/org/apache/kafka/trogdor/workload/ExternalCommandWorker.java index 85c53ca97a9e9..0b9b53564eb39 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/workload/ExternalCommandWorker.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/workload/ExternalCommandWorker.java @@ -208,7 +208,7 @@ public void run() { log.trace("{}: read line from stdin: {}", id, line); JsonNode resp = readObject(line); if (resp.has("status")) { - log.info("{}: New status: {}", id, resp.get("status").toString()); + log.info("{}: New status: {}", id, resp.get("status")); status.update(resp.get("status")); } if (resp.has("log")) {