diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0f6cea60a25b9..09a6b9d9dc0e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -139,8 +139,9 @@ jobs: # --info: For now, we'll generate lots of logs while setting up the GH Actions # --scan: Publish the build scan. This will only work on PRs from apache/kafka and trunk # --no-scan: For public fork PRs, we won't attempt to publish the scan - run: | - ./gradlew --build-cache --info $SCAN_ARG check releaseTarGz -x test + run: ./gradlew --build-cache --info $SCAN_ARG check releaseTarGz -x test + - name: Sanity Check + run: ./gradlew --build-cache rewriteDryRun - name: Archive check reports if: always() uses: actions/upload-artifact@v4 diff --git a/README.md b/README.md index 49d1f9fd5cf50..98d0fee7c5344 100644 --- a/README.md +++ b/README.md @@ -211,6 +211,16 @@ The import order is a part of static check. please call `spotlessApply` to optim ./gradlew spotlessApply +#### Rewrite +The build system incorporates [Moderne](https://moderne.io/) rewrite capabilities for automated code transformations. + +- **Convention** (e.g., JUnit's naming rules) +- **Refactor** safely (e.g., rename methods, migrate APIs) +- **Modernize** (e.g., Java 8 → Java 17 features) +- **Patterns** (e.g., replace `Vector` with `ArrayList`) + +`./gradlew rewriteDryRun` + #### Spotbugs #### Spotbugs uses static analysis to look for bugs in the code. You can run spotbugs using: diff --git a/build.gradle b/build.gradle index 5b6714f882294..eb19e4eaf466e 100644 --- a/build.gradle +++ b/build.gradle @@ -29,20 +29,22 @@ buildscript { } plugins { - id 'com.github.ben-manes.versions' version '0.53.0' id 'idea' id 'jacoco' id 'java-library' + id 'com.diffplug.spotless' version '8.1.0' + id 'com.github.ben-manes.versions' version '0.53.0' + id 'com.github.spotbugs' version '6.4.4' apply false + id 'com.gradleup.shadow' version '8.3.9' apply false + id 'io.swagger.core.v3.swagger-gradle-plugin' version "${swaggerVersion}" + id 'org.nosphere.apache.rat' version '0.8.1' + id 'org.openrewrite.rewrite' version '7.22.0' apply false id 'org.owasp.dependencycheck' version '12.1.8' - id 'org.nosphere.apache.rat' version "0.8.1" - id "io.swagger.core.v3.swagger-gradle-plugin" version "${swaggerVersion}" - - id "com.github.spotbugs" version '6.4.4' apply false id 'org.scoverage' version '8.1' apply false - id 'com.gradleup.shadow' version '8.3.9' apply false - id 'com.diffplug.spotless' version "8.0.0" } +apply from: "$rootDir/gradle/rewrite.gradle" + ext { minClientJavaVersion = 11 minNonClientJavaVersion = 17 diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java index 33dcc117f2bf3..3c64bb958e59b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java @@ -88,12 +88,6 @@ public static ShareAcknowledgeResponse parse(Readable readable, short version) { ); } - private static boolean matchingTopic(ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse previousTopic, TopicIdPartition currentTopic) { - if (previousTopic == null) - return false; - return previousTopic.topicId().equals(currentTopic.topicId()); - } - public static ShareAcknowledgeResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { return partitionResponse(topicIdPartition.topicPartition().partition(), error); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java index 4cd9d613095c8..542bf37a1a278 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteRecordsHandlerTest.java @@ -61,14 +61,14 @@ public class DeleteRecordsHandlerTest { private final TopicPartition t0p3 = new TopicPartition("t0", 3); private final Node node1 = new Node(1, "host", 1234); private final Node node2 = new Node(2, "host", 1235); - private final Map recordsToDelete = new HashMap<>() { - { - put(t0p0, RecordsToDelete.beforeOffset(10L)); - put(t0p1, RecordsToDelete.beforeOffset(10L)); - put(t0p2, RecordsToDelete.beforeOffset(10L)); - put(t0p3, RecordsToDelete.beforeOffset(10L)); - } - }; + private final Map recordsToDelete; + { + recordsToDelete = new HashMap<>(); + recordsToDelete.put(t0p0, RecordsToDelete.beforeOffset(10L)); + recordsToDelete.put(t0p1, RecordsToDelete.beforeOffset(10L)); + recordsToDelete.put(t0p2, RecordsToDelete.beforeOffset(10L)); + recordsToDelete.put(t0p3, RecordsToDelete.beforeOffset(10L)); + } @Test public void testBuildRequestSimple() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java index a7156554001aa..ba8bdb03ca082 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandlerTest.java @@ -65,16 +65,16 @@ public final class ListOffsetsHandlerTest { private final Node node = new Node(1, "host", 1234); - private final Map offsetTimestampsByPartition = new HashMap<>() { - { - put(t0p0, ListOffsetsRequest.LATEST_TIMESTAMP); - put(t0p1, ListOffsetsRequest.EARLIEST_TIMESTAMP); - put(t1p0, 123L); - put(t1p1, ListOffsetsRequest.MAX_TIMESTAMP); - put(t2p0, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP); - put(t2p1, ListOffsetsRequest.LATEST_TIERED_TIMESTAMP); - } - }; + private final Map offsetTimestampsByPartition; + { + offsetTimestampsByPartition = new HashMap<>(); + offsetTimestampsByPartition.put(t0p0, ListOffsetsRequest.LATEST_TIMESTAMP); + offsetTimestampsByPartition.put(t0p1, ListOffsetsRequest.EARLIEST_TIMESTAMP); + offsetTimestampsByPartition.put(t1p0, 123L); + offsetTimestampsByPartition.put(t1p1, ListOffsetsRequest.MAX_TIMESTAMP); + offsetTimestampsByPartition.put(t2p0, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP); + offsetTimestampsByPartition.put(t2p1, ListOffsetsRequest.LATEST_TIERED_TIMESTAMP); + } @Test public void testBuildRequestSimple() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index f8e53148b0100..8a03a1ef5f3b5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -159,11 +159,11 @@ public class FetchRequestManagerTest { private final String topicName = "test"; private final String groupId = "test-group"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap<>() { - { - put(topicName, topicId); - } - }; + private final Map topicIds; + { + topicIds = new HashMap<>(); + topicIds.put(topicName, topicId); + } private final Map topicNames = singletonMap(topicId, topicName); private final String metricGroup = "consumer" + groupId + "-fetch-manager-metrics"; private final TopicPartition tp0 = new TopicPartition(topicName, 0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 93da0a4433af0..6699d8ea5b803 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -155,11 +155,11 @@ public class FetcherTest { private final String topicName = "test"; private final String groupId = "test-group"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap<>() { - { - put(topicName, topicId); - } - }; + private final Map topicIds; + { + topicIds = new HashMap<>(); + topicIds.put(topicName, topicId); + } private final Map topicNames = singletonMap(topicId, topicName); private final String metricGroup = "consumer" + groupId + "-fetch-manager-metrics"; private final TopicPartition tp0 = new TopicPartition(topicName, 0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 182900c0207ac..1655eb39016df 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -101,11 +101,11 @@ public class OffsetFetcherTest { private final String topicName = "test"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap<>() { - { - put(topicName, topicId); - } - }; + private final Map topicIds; + { + topicIds = new HashMap<>(); + topicIds.put(topicName, topicId); + } private final TopicPartition tp0 = new TopicPartition(topicName, 0); private final TopicPartition tp1 = new TopicPartition(topicName, 1); private final TopicPartition tp2 = new TopicPartition(topicName, 2); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index b6040950ccdd6..1cf01a6a4bc1e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -142,12 +142,12 @@ public class ShareConsumeRequestManagerTest { put(topicName2, topicId2); } }; - private final Map topicPartitionCounts = new HashMap<>() { - { - put(topicName, 2); - put(topicName2, 1); - } - }; + private final Map topicPartitionCounts; + { + topicPartitionCounts = new HashMap<>(); + topicPartitionCounts.put(topicName, 2); + topicPartitionCounts.put(topicName2, 1); + } private final TopicPartition tp0 = new TopicPartition(topicName, 0); private final TopicIdPartition tip0 = new TopicIdPartition(topicId, tp0); private final TopicPartition tp1 = new TopicPartition(topicName, 1); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java index ce2aa86e3e26e..16d6924f0e0da 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataFetcherTest.java @@ -58,11 +58,11 @@ public class TopicMetadataFetcherTest { private final String topicName = "test"; private final Uuid topicId = Uuid.randomUuid(); - private final Map topicIds = new HashMap<>() { - { - put(topicName, topicId); - } - }; + private final Map topicIds; + { + topicIds = new HashMap<>(); + topicIds.put(topicName, topicId); + } private final TopicPartition tp0 = new TopicPartition(topicName, 0); private final int validLeaderEpoch = 0; private final MetadataResponse initialUpdateResponse = diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 5d15229a838a6..cb402b404943f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -250,11 +250,10 @@ public void testOverwriteAcksAndRetriesForIdempotentProducers() { public void testAcksAndIdempotenceForIdempotentProducers() { Properties baseProps = baseProperties(); - Properties validProps = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "0"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - }}; + Properties validProps = new Properties(); + validProps.putAll(baseProps); + validProps.setProperty(ProducerConfig.ACKS_CONFIG, "0"); + validProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); ProducerConfig config = new ProducerConfig(validProps); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -264,10 +263,9 @@ public void testAcksAndIdempotenceForIdempotentProducers() { config.getString(ProducerConfig.ACKS_CONFIG), "acks should be overwritten"); - Properties validProps2 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties validProps2 = new Properties(); + validProps2.putAll(baseProps); + validProps2.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); config = new ProducerConfig(validProps2); assertTrue( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -277,11 +275,10 @@ public void testAcksAndIdempotenceForIdempotentProducers() { config.getString(ProducerConfig.ACKS_CONFIG), "acks should be set with the default value"); - Properties validProps3 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "all"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - }}; + Properties validProps3 = new Properties(); + validProps3.putAll(baseProps); + validProps3.setProperty(ProducerConfig.ACKS_CONFIG, "all"); + validProps3.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); config = new ProducerConfig(validProps3); assertFalse(config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), "idempotence should be overwritten"); @@ -290,10 +287,9 @@ public void testAcksAndIdempotenceForIdempotentProducers() { config.getString(ProducerConfig.ACKS_CONFIG), "acks should be overwritten"); - Properties validProps4 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "0"); - }}; + Properties validProps4 = new Properties(); + validProps4.putAll(baseProps); + validProps4.setProperty(ProducerConfig.ACKS_CONFIG, "0"); config = new ProducerConfig(validProps4); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -303,10 +299,9 @@ public void testAcksAndIdempotenceForIdempotentProducers() { config.getString(ProducerConfig.ACKS_CONFIG), "acks should be set with overridden value"); - Properties validProps5 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "1"); - }}; + Properties validProps5 = new Properties(); + validProps5.putAll(baseProps); + validProps5.setProperty(ProducerConfig.ACKS_CONFIG, "1"); config = new ProducerConfig(validProps5); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -316,33 +311,30 @@ public void testAcksAndIdempotenceForIdempotentProducers() { config.getString(ProducerConfig.ACKS_CONFIG), "acks should be set with overridden value"); - Properties invalidProps = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "0"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties invalidProps = new Properties(); + invalidProps.putAll(baseProps); + invalidProps.setProperty(ProducerConfig.ACKS_CONFIG, "0"); + invalidProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); + invalidProps.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps), "Cannot set a transactional.id without also enabling idempotence"); - Properties invalidProps2 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "1"); - // explicitly enabling idempotence should still throw exception - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); - }}; + Properties invalidProps2 = new Properties(); + invalidProps2.putAll(baseProps); + invalidProps2.setProperty(ProducerConfig.ACKS_CONFIG, "1"); + // explicitly enabling idempotence should still throw exception + invalidProps2.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps2), "Must set acks to all in order to use the idempotent producer"); - Properties invalidProps3 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.ACKS_CONFIG, "0"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties invalidProps3 = new Properties(); + invalidProps3.putAll(baseProps); + invalidProps3.setProperty(ProducerConfig.ACKS_CONFIG, "0"); + invalidProps3.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps3), @@ -353,11 +345,10 @@ public void testAcksAndIdempotenceForIdempotentProducers() { public void testRetriesAndIdempotenceForIdempotentProducers() { Properties baseProps = baseProperties(); - Properties validProps = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.RETRIES_CONFIG, "0"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - }}; + Properties validProps = new Properties(); + validProps.putAll(baseProps); + validProps.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); + validProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); ProducerConfig config = new ProducerConfig(validProps); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -367,10 +358,9 @@ public void testRetriesAndIdempotenceForIdempotentProducers() { config.getInt(ProducerConfig.RETRIES_CONFIG), "retries should be overwritten"); - Properties validProps2 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.RETRIES_CONFIG, "0"); - }}; + Properties validProps2 = new Properties(); + validProps2.putAll(baseProps); + validProps2.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); config = new ProducerConfig(validProps2); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -380,33 +370,30 @@ public void testRetriesAndIdempotenceForIdempotentProducers() { config.getInt(ProducerConfig.RETRIES_CONFIG), "retries should be set with overridden value"); - Properties invalidProps = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.RETRIES_CONFIG, "0"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties invalidProps = new Properties(); + invalidProps.putAll(baseProps); + invalidProps.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); + invalidProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); + invalidProps.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps), "Cannot set a transactional.id without also enabling idempotence"); - Properties invalidProps2 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.RETRIES_CONFIG, "0"); - // explicitly enabling idempotence should still throw exception - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); - }}; + Properties invalidProps2 = new Properties(); + invalidProps2.putAll(baseProps); + invalidProps2.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); + // explicitly enabling idempotence should still throw exception + invalidProps2.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps2), "Must set retries to non-zero when using the idempotent producer."); - Properties invalidProps3 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.RETRIES_CONFIG, "0"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties invalidProps3 = new Properties(); + invalidProps3.putAll(baseProps); + invalidProps3.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); + invalidProps3.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps3), @@ -425,11 +412,10 @@ private Properties baseProperties() { public void testInflightRequestsAndIdempotenceForIdempotentProducers() { Properties baseProps = baseProperties(); - Properties validProps = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - }}; + Properties validProps = new Properties(); + validProps.putAll(baseProps); + validProps.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); + validProps.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); ProducerConfig config = new ProducerConfig(validProps); assertFalse( config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), @@ -439,42 +425,38 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), "max.in.flight.requests.per.connection should be overwritten"); - Properties invalidProps1 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); - }}; + Properties invalidProps1 = new Properties(); + invalidProps1.putAll(baseProps); + invalidProps1.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(invalidProps1)); assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + - " must be set to at most 5. Current value is 6.", configException.getMessage()); - - Properties invalidProps2 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + " must be set to at most 5. Current value is 6.", configException.getMessage()); + + Properties invalidProps2 = new Properties(); + invalidProps2.putAll(baseProps); + invalidProps2.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); + invalidProps2.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); + invalidProps2.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps2), "Cannot set a transactional.id without also enabling idempotence"); - Properties invalidProps3 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); - // explicitly enabling idempotence should still throw exception - setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); - }}; + Properties invalidProps3 = new Properties(); + invalidProps3.putAll(baseProps); + invalidProps3.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); + // explicitly enabling idempotence should still throw exception + invalidProps3.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps3), "Must set max.in.flight.requests.per.connection to at most 5 when using the idempotent producer."); - Properties invalidProps4 = new Properties() {{ - putAll(baseProps); - setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); - setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); - }}; + Properties invalidProps4 = new Properties(); + invalidProps4.putAll(baseProps); + invalidProps4.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); + invalidProps4.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); assertThrows( ConfigException.class, () -> new ProducerConfig(invalidProps4), diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java index 52902976a1670..a64fa18691a0b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java @@ -400,19 +400,13 @@ public void shouldPublishConsumerGroupOffsetsOnlyAfterCommitIfTransactionsAreEna producer.beginTransaction(); String group1 = "g1"; - Map group1Commit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); - } - }; + Map group1Commit = new HashMap<>(); + group1Commit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); + group1Commit.put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); String group2 = "g2"; - Map group2Commit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(101L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(21L, null)); - } - }; + Map group2Commit = new HashMap<>(); + group2Commit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(101L, null)); + group2Commit.put(new TopicPartition(topic, 1), new OffsetAndMetadata(21L, null)); producer.sendOffsetsToTransaction(group1Commit, new ConsumerGroupMetadata(group1)); producer.sendOffsetsToTransaction(group2Commit, new ConsumerGroupMetadata(group2)); @@ -451,11 +445,8 @@ public void shouldAddOffsetsWhenSendOffsetsToTransactionByGroupMetadata() { assertFalse(producer.sentOffsets()); - Map groupCommit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - } - }; + Map groupCommit = new HashMap<>(); + groupCommit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); producer.sendOffsetsToTransaction(groupCommit, new ConsumerGroupMetadata("groupId")); assertTrue(producer.sentOffsets()); } @@ -468,11 +459,8 @@ public void shouldResetSentOffsetsFlagOnlyWhenBeginningNewTransaction() { assertFalse(producer.sentOffsets()); - Map groupCommit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - } - }; + Map groupCommit = new HashMap<>(); + groupCommit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); producer.sendOffsetsToTransaction(groupCommit, new ConsumerGroupMetadata("groupId")); producer.commitTransaction(); // commit should not reset "sentOffsets" flag assertTrue(producer.sentOffsets()); @@ -495,18 +483,12 @@ public void shouldPublishLatestAndCumulativeConsumerGroupOffsetsOnlyAfterCommitI producer.beginTransaction(); String group = "g"; - Map groupCommit1 = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); - } - }; - Map groupCommit2 = new HashMap<>() { - { - put(new TopicPartition(topic, 1), new OffsetAndMetadata(101L, null)); - put(new TopicPartition(topic, 2), new OffsetAndMetadata(21L, null)); - } - }; + Map groupCommit1 = new HashMap<>(); + groupCommit1.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); + groupCommit1.put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); + Map groupCommit2 = new HashMap<>(); + groupCommit2.put(new TopicPartition(topic, 1), new OffsetAndMetadata(101L, null)); + groupCommit2.put(new TopicPartition(topic, 2), new OffsetAndMetadata(21L, null)); producer.sendOffsetsToTransaction(groupCommit1, new ConsumerGroupMetadata(group)); producer.sendOffsetsToTransaction(groupCommit2, new ConsumerGroupMetadata(group)); @@ -532,12 +514,9 @@ public void shouldDropConsumerGroupOffsetsOnAbortIfTransactionsAreEnabled() { producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); - } - }; + Map groupCommit = new HashMap<>(); + groupCommit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); + groupCommit.put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); producer.sendOffsetsToTransaction(groupCommit, new ConsumerGroupMetadata(group)); producer.abortTransaction(); @@ -561,12 +540,9 @@ public void shouldPreserveOffsetsFromCommitByGroupIdOnAbortIfTransactionsAreEnab producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); - } - }; + Map groupCommit = new HashMap<>(); + groupCommit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); + groupCommit.put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); producer.sendOffsetsToTransaction(groupCommit, new ConsumerGroupMetadata(group)); producer.commitTransaction(); @@ -586,24 +562,18 @@ public void shouldPreserveOffsetsFromCommitByGroupMetadataOnAbortIfTransactionsA producer.beginTransaction(); String group = "g"; - Map groupCommit = new HashMap<>() { - { - put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); - put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); - } - }; + Map groupCommit = new HashMap<>(); + groupCommit.put(new TopicPartition(topic, 0), new OffsetAndMetadata(42L, null)); + groupCommit.put(new TopicPartition(topic, 1), new OffsetAndMetadata(73L, null)); producer.sendOffsetsToTransaction(groupCommit, new ConsumerGroupMetadata(group)); producer.commitTransaction(); producer.beginTransaction(); String group2 = "g2"; - Map groupCommit2 = new HashMap<>() { - { - put(new TopicPartition(topic, 2), new OffsetAndMetadata(53L, null)); - put(new TopicPartition(topic, 3), new OffsetAndMetadata(84L, null)); - } - }; + Map groupCommit2 = new HashMap<>(); + groupCommit2.put(new TopicPartition(topic, 2), new OffsetAndMetadata(53L, null)); + groupCommit2.put(new TopicPartition(topic, 3), new OffsetAndMetadata(84L, null)); producer.sendOffsetsToTransaction(groupCommit2, new ConsumerGroupMetadata(group2)); producer.abortTransaction(); diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java index 9a31a63915d3d..e24f2b71b6aa1 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java @@ -38,14 +38,11 @@ class EnvVarConfigProviderTest { @BeforeEach public void setup() { - Map testEnvVars = new HashMap<>() { - { - put("test_var1", "value1"); - put("secret_var2", "value2"); - put("new_var3", "value3"); - put("not_so_secret_var4", "value4"); - } - }; + Map testEnvVars = new HashMap<>(); + testEnvVars.put("test_var1", "value1"); + testEnvVars.put("secret_var2", "value2"); + testEnvVars.put("new_var3", "value3"); + testEnvVars.put("not_so_secret_var4", "value4"); envVarConfigProvider = new EnvVarConfigProvider(testEnvVars); envVarConfigProvider.configure(Collections.singletonMap("", "")); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java index 94f4f1fc8c49e..01cd19f2e637f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientTest.java @@ -42,13 +42,13 @@ public class OAuthBearerSaslClientTest { - private static final Map TEST_PROPERTIES = new LinkedHashMap<>() { - { - put("One", "1"); - put("Two", "2"); - put("Three", "3"); - } - }; + private static final Map TEST_PROPERTIES; + static { + TEST_PROPERTIES = new LinkedHashMap<>(); + TEST_PROPERTIES.put("One", "1"); + TEST_PROPERTIES.put("Two", "2"); + TEST_PROPERTIES.put("Three", "3"); + } private SaslExtensions testExtensions = new SaslExtensions(TEST_PROPERTIES); private final String errorMessage = "Error as expected!"; diff --git a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java index 521a0f19415ad..eaff0f367c1d6 100644 --- a/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/serialization/SerializationTest.java @@ -45,24 +45,24 @@ public class SerializationTest { private final String topic = "testTopic"; - private final Map, List> testData = new HashMap<>() { - { - put(String.class, Arrays.asList(null, "my string")); - put(Short.class, Arrays.asList(null, (short) 32767, (short) -32768)); - put(Integer.class, Arrays.asList(null, 423412424, -41243432)); - put(Long.class, Arrays.asList(null, 922337203685477580L, -922337203685477581L)); - put(Float.class, Arrays.asList(null, 5678567.12312f, -5678567.12341f)); - put(Double.class, Arrays.asList(null, 5678567.12312d, -5678567.12341d)); - put(byte[].class, Arrays.asList(null, "my string".getBytes())); - put(ByteBuffer.class, Arrays.asList( - null, - ByteBuffer.wrap("my string".getBytes()), - ByteBuffer.allocate(10).put("my string".getBytes()), - ByteBuffer.allocateDirect(10).put("my string".getBytes()))); - put(Bytes.class, Arrays.asList(null, new Bytes("my string".getBytes()))); - put(UUID.class, Arrays.asList(null, UUID.randomUUID())); - } - }; + private final Map, List> testData; + { + testData = new HashMap<>(); + testData.put(String.class, Arrays.asList(null, "my string")); + testData.put(Short.class, Arrays.asList(null, (short) 32767, (short) -32768)); + testData.put(Integer.class, Arrays.asList(null, 423412424, -41243432)); + testData.put(Long.class, Arrays.asList(null, 922337203685477580L, -922337203685477581L)); + testData.put(Float.class, Arrays.asList(null, 5678567.12312f, -5678567.12341f)); + testData.put(Double.class, Arrays.asList(null, 5678567.12312d, -5678567.12341d)); + testData.put(byte[].class, Arrays.asList(null, "my string".getBytes())); + testData.put(ByteBuffer.class, Arrays.asList( + null, + ByteBuffer.wrap("my string".getBytes()), + ByteBuffer.allocate(10).put("my string".getBytes()), + ByteBuffer.allocateDirect(10).put("my string".getBytes()))); + testData.put(Bytes.class, Arrays.asList(null, new Bytes("my string".getBytes()))); + testData.put(UUID.class, Arrays.asList(null, UUID.randomUUID())); + } private static class DummyClass { } diff --git a/config/sanity.yml b/config/sanity.yml new file mode 100644 index 0000000000000..5a18c1bff7311 --- /dev/null +++ b/config/sanity.yml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +type: specs.openrewrite.org/v1beta/recipe +name: org.apache.kafka.openrewrite.SanityCheck +displayName: Apply all common best practices +description: Comprehensive code quality recipe combining modernization, security, and best practices. +recipeList: + - org.openrewrite.staticanalysis.NoDoubleBraceInitialization + - org.openrewrite.staticanalysis.RemoveUnusedPrivateMethods +# - org.openrewrite.staticanalysis.CommonStaticAnalysis # composition for -> NoDoubleBraceInitialization +# - org.openrewrite.staticanalysis.EqualsAvoidsNull +# - tech.picnic.errorprone.refasterrules.EqualityRulesRecipes +# - tech.picnic.errorprone.refasterrules.NullRulesRecipes +# - tech.picnic.errorprone.refasterrules.StringRulesRecipes +# - tech.picnic.errorprone.refasterrules.TimeRulesRecipes +--- diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java index 6d1d50f558bab..ada8eb02de332 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java @@ -491,10 +491,9 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter produceMessages(primaryProducer, "test-topic-1"); String backupTopic1 = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); String consumerGroupName = "consumer-group-testOneWayReplicationWithAutoOffsetSync"; - Map consumerProps = new HashMap<>() {{ - put("group.id", consumerGroupName); - put("auto.offset.reset", "earliest"); - }}; + Map consumerProps = new HashMap<>(); + consumerProps.put("group.id", consumerGroupName); + consumerProps.put("auto.offset.reset", "earliest"); // create consumers before starting the connectors, so we don't need to wait for discovery try (Consumer primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java index d376fa8f2dae4..136152612cd9a 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java @@ -151,9 +151,8 @@ public void startClusters() throws Exception { additionalBackupClusterClientsConfigs.putAll(superUserConfig()); backupWorkerProps.putAll(superUserConfig()); - Map additionalConfig = new HashMap<>(superUserConfig()) {{ - put(FORWARDING_ADMIN_CLASS, FakeForwardingAdminWithLocalMetadata.class.getName()); - }}; + Map additionalConfig = new HashMap<>(superUserConfig()); + additionalConfig.put(FORWARDING_ADMIN_CLASS, FakeForwardingAdminWithLocalMetadata.class.getName()); superUserConfig().forEach((property, value) -> { additionalConfig.put(CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + property, value); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java index f9e47afd0b31f..d6fc3245c826c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java @@ -82,16 +82,18 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class RetryWithToleranceOperatorTest { - private static final Map PROPERTIES = new HashMap<>() {{ - put(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG, Objects.toString(2)); - put(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG, Objects.toString(3000)); - put(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.INFO.toString()); - - // define required properties - put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); - put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); - put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); - }}; + private static final Map PROPERTIES; + static { + PROPERTIES = new HashMap<>(); + PROPERTIES.put(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG, Objects.toString(2)); + PROPERTIES.put(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG, Objects.toString(3000)); + PROPERTIES.put(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.INFO.toString()); + + // define required properties + PROPERTIES.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + PROPERTIES.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); + PROPERTIES.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); + } public static RetryWithToleranceOperator noneOperator() { return genericOperator(ERRORS_RETRY_TIMEOUT_DEFAULT, NONE, new ErrorHandlingMetrics( diff --git a/gradle/rewrite.gradle b/gradle/rewrite.gradle new file mode 100644 index 0000000000000..bad6664d7e3f1 --- /dev/null +++ b/gradle/rewrite.gradle @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +apply plugin: 'org.openrewrite.rewrite' + +dependencies { + rewrite('org.openrewrite.recipe:rewrite-static-analysis:2.23.0') +} + +rewrite { + activeRecipe('org.apache.kafka.openrewrite.SanityCheck') + configFile = project.getRootProject().file("${rootDir}/config/sanity.yml") + setExportDatatables(true) + setFailOnDryRunResults(true) +} diff --git a/server-common/src/test/java/org/apache/kafka/metadata/AssignmentsHelperTest.java b/server-common/src/test/java/org/apache/kafka/metadata/AssignmentsHelperTest.java index cfc4e5dfe931f..b26bb39cdb1d5 100644 --- a/server-common/src/test/java/org/apache/kafka/metadata/AssignmentsHelperTest.java +++ b/server-common/src/test/java/org/apache/kafka/metadata/AssignmentsHelperTest.java @@ -39,13 +39,12 @@ public class AssignmentsHelperTest { @Test public void testBuildRequestData() { - Map assignment = new HashMap() {{ - put(new TopicIdPartition(TOPIC_1, 1), DIR_1); - put(new TopicIdPartition(TOPIC_1, 2), DIR_2); - put(new TopicIdPartition(TOPIC_1, 3), DIR_3); - put(new TopicIdPartition(TOPIC_1, 4), DIR_1); - put(new TopicIdPartition(TOPIC_2, 5), DIR_2); - }}; + Map assignment = new HashMap(); + assignment.put(new TopicIdPartition(TOPIC_1, 1), DIR_1); + assignment.put(new TopicIdPartition(TOPIC_1, 2), DIR_2); + assignment.put(new TopicIdPartition(TOPIC_1, 3), DIR_3); + assignment.put(new TopicIdPartition(TOPIC_1, 4), DIR_1); + assignment.put(new TopicIdPartition(TOPIC_2, 5), DIR_2); AssignReplicasToDirsRequestData built = AssignmentsHelper.buildRequestData(8, 100L, assignment); AssignReplicasToDirsRequestData expected = new AssignReplicasToDirsRequestData() .setBrokerId(8) diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/PauseResumeIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/PauseResumeIntegrationTest.java index e0f080f885581..1c9224e6e137c 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/PauseResumeIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/PauseResumeIntegrationTest.java @@ -91,10 +91,12 @@ public class PauseResumeIntegrationTest { asList(pair("A", 1L), pair("B", 1L), pair("A", 2L), pair("C", 1L), pair("C", 2L)); private static final List> COUNT_OUTPUT_DATA2 = asList(pair("A", 3L), pair("B", 2L), pair("A", 4L), pair("C", 3L), pair("C", 4L)); - private static final List> COUNT_OUTPUT_DATA_ALL = new ArrayList>() {{ - addAll(COUNT_OUTPUT_DATA); - addAll(COUNT_OUTPUT_DATA2); - }}; + private static final List> COUNT_OUTPUT_DATA_ALL; + static { + COUNT_OUTPUT_DATA_ALL = new ArrayList>(); + COUNT_OUTPUT_DATA_ALL.addAll(COUNT_OUTPUT_DATA); + COUNT_OUTPUT_DATA_ALL.addAll(COUNT_OUTPUT_DATA2); + } private String appId; private KafkaStreams kafkaStreams, kafkaStreams2; diff --git a/tools/src/test/java/org/apache/kafka/tools/ReplicaVerificationToolTest.java b/tools/src/test/java/org/apache/kafka/tools/ReplicaVerificationToolTest.java index 68e64aa9c7337..e2f72ac83d64b 100644 --- a/tools/src/test/java/org/apache/kafka/tools/ReplicaVerificationToolTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/ReplicaVerificationToolTest.java @@ -35,11 +35,10 @@ public class ReplicaVerificationToolTest { @Test void testReplicaBufferVerifyChecksum() { StringBuilder sb = new StringBuilder(); - final Map expectedReplicasPerTopicAndPartition = new HashMap() {{ - put(new TopicPartition("a", 0), 3); - put(new TopicPartition("a", 1), 3); - put(new TopicPartition("b", 0), 2); - }}; + final Map expectedReplicasPerTopicAndPartition = new HashMap(); + expectedReplicasPerTopicAndPartition.put(new TopicPartition("a", 0), 3); + expectedReplicasPerTopicAndPartition.put(new TopicPartition("a", 1), 3); + expectedReplicasPerTopicAndPartition.put(new TopicPartition("b", 0), 2); ReplicaVerificationTool.ReplicaBuffer replicaBuffer = new ReplicaVerificationTool.ReplicaBuffer(expectedReplicasPerTopicAndPartition, Map.of(), 2, 0);