diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh index b3291e461f2ba..64cf6d95d5135 100755 --- a/bin/kafka-run-class.sh +++ b/bin/kafka-run-class.sh @@ -116,14 +116,6 @@ else CLASSPATH="$file":"$CLASSPATH" fi done - if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH" - fi - if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH" - fi fi for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar; diff --git a/build.gradle b/build.gradle index a269a58d4057c..ef599d7a4e7d0 100644 --- a/build.gradle +++ b/build.gradle @@ -50,7 +50,7 @@ ext { minClientJavaVersion = 11 minNonClientJavaVersion = 17 // The connect:api module also belongs to the clients module, but it has already been bumped to JDK 17 as part of KIP-1032. - modulesNeedingJava11 = [":clients", ":streams", ":streams:test-utils", ":streams-scala", ":test-common:test-common-runtime"] + modulesNeedingJava11 = [":clients", ":generator", ":streams", ":streams:test-utils", ":streams-scala", ":test-common:test-common-runtime"] buildVersionFileName = "kafka-version.properties" @@ -129,8 +129,7 @@ ext { if (name in ["compileTestJava", "compileTestScala"]) { options.compilerArgs << "-parameters" } else if (name in ["compileJava", "compileScala"]) { - if (!project.path.startsWith(":connect") && !project.path.startsWith(":storage")) - options.compilerArgs << "-Xlint:-rawtypes" + options.compilerArgs << "-Xlint:-rawtypes" options.compilerArgs << "-Xlint:all" options.compilerArgs << "-Xlint:-serial" options.compilerArgs << "-Xlint:-try" @@ -2759,9 +2758,6 @@ project(':streams') { ':streams:integration-tests', ':streams:test-utils:test', ':streams:streams-scala:test', - ':streams:upgrade-system-tests-0100:test', - ':streams:upgrade-system-tests-0101:test', - ':streams:upgrade-system-tests-0102:test', ':streams:upgrade-system-tests-0110:test', ':streams:upgrade-system-tests-10:test', ':streams:upgrade-system-tests-11:test', @@ -2970,57 +2966,6 @@ project(':streams:examples') { } } -project(':streams:upgrade-system-tests-0100') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0100" - } - - dependencies { - testImplementation(libs.kafkaStreams_0100) { - exclude group: 'org.slf4j', module: 'slf4j-log4j12' - exclude group: 'log4j', module: 'log4j' - } - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-0101') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0101" - } - - dependencies { - testImplementation(libs.kafkaStreams_0101) { - exclude group: 'org.slf4j', module: 'slf4j-log4j12' - exclude group: 'log4j', module: 'log4j' - } - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-0102') { - base { - archivesName = "kafka-streams-upgrade-system-tests-0102" - } - - dependencies { - testImplementation libs.kafkaStreams_0102 - testRuntimeOnly libs.junitJupiter - } - - systemTestLibs { - dependsOn testJar - } -} - project(':streams:upgrade-system-tests-0110') { base{ archivesName = "kafka-streams-upgrade-system-tests-0110" diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 5624b5c70ef2c..2f8ff2d84c24f 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -47,7 +47,7 @@ - + diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index c75d0e263fc38..5bbabe61a6ce1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -83,9 +83,7 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; @@ -353,7 +351,8 @@ public void onGroupAssignmentUpdated(Set partitions) { metrics, fetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - backgroundEventHandler); + backgroundEventHandler, + false); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); final Supplier requestManagersSupplier = RequestManagers.supplier(time, @@ -524,7 +523,8 @@ public void onGroupAssignmentUpdated(Set partitions) { logContext, client, metadata, - backgroundEventHandler + backgroundEventHandler, + false ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); Supplier requestManagersSupplier = RequestManagers.supplier( @@ -1574,12 +1574,9 @@ public void unsubscribe() { subscriptions.assignedPartitions()); try { - // If users subscribe to a topic with invalid name or without permission, they will get some exceptions. - // Because network thread keeps trying to send MetadataRequest or ConsumerGroupHeartbeatRequest in the background, - // there will be some error events in the background queue. + // If users have fatal error, they will get some exceptions in the background queue. // When running unsubscribe, these exceptions should be ignored, or users can't unsubscribe successfully. - processBackgroundEvents(unsubscribeEvent.future(), timer, - e -> e instanceof InvalidTopicException || e instanceof TopicAuthorizationException || e instanceof GroupAuthorizationException); + processBackgroundEvents(unsubscribeEvent.future(), timer, e -> e instanceof GroupAuthorizationException); log.info("Unsubscribed all topics or patterns and assigned partitions"); } catch (TimeoutException e) { log.error("Failed while waiting for the unsubscribe event to complete"); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index 4f7d256104bb8..7ca01ce257831 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventProcessor; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; +import org.apache.kafka.clients.consumer.internals.events.CompletableApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.common.internals.IdempotentCloser; @@ -40,6 +41,7 @@ import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.function.Supplier; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.common.utils.Utils.closeQuietly; @@ -154,6 +156,8 @@ void runOnce() { .reduce(Long.MAX_VALUE, Math::min); reapExpiredApplicationEvents(currentTimeMs); + List> uncompletedEvents = applicationEventReaper.uncompletedEvents(); + maybeFailOnMetadataError(uncompletedEvents); } /** @@ -165,9 +169,13 @@ private void processApplicationEvents() { for (ApplicationEvent event : events) { try { - if (event instanceof CompletableEvent) + if (event instanceof CompletableEvent) { applicationEventReaper.add((CompletableEvent) event); - + // Check if there are any metadata errors and fail the CompletableEvent if an error is present. + // This call is meant to handle "immediately completed events" which may not enter the awaiting state, + // so metadata errors need to be checked and handled right away. + maybeFailOnMetadataError(List.of((CompletableEvent) event)); + } applicationEventProcessor.process(event); } catch (Throwable t) { log.warn("Error processing event {}", t.getMessage(), t); @@ -325,4 +333,21 @@ void cleanup() { log.debug("Closed the consumer network thread"); } } + + /** + * If there is a metadata error, complete all uncompleted events that require subscription metadata. + */ + private void maybeFailOnMetadataError(List> events) { + List> subscriptionMetadataEvent = events.stream() + .filter(e -> e instanceof CompletableApplicationEvent) + .map(e -> (CompletableApplicationEvent) e) + .filter(CompletableApplicationEvent::requireSubscriptionMetadata) + .collect(Collectors.toList()); + + if (subscriptionMetadataEvent.isEmpty()) + return; + networkClientDelegate.getAndClearMetadataError().ifPresent(metadataError -> + subscriptionMetadataEvent.forEach(event -> event.future().completeExceptionally(metadataError)) + ); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java index fd05f8983555d..6c1ab43a4f253 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java @@ -69,6 +69,8 @@ public class NetworkClientDelegate implements AutoCloseable { private final int requestTimeoutMs; private final Queue unsentRequests; private final long retryBackoffMs; + private Optional metadataError; + private final boolean notifyMetadataErrorsViaErrorQueue; public NetworkClientDelegate( final Time time, @@ -76,7 +78,8 @@ public NetworkClientDelegate( final LogContext logContext, final KafkaClient client, final Metadata metadata, - final BackgroundEventHandler backgroundEventHandler) { + final BackgroundEventHandler backgroundEventHandler, + final boolean notifyMetadataErrorsViaErrorQueue) { this.time = time; this.client = client; this.metadata = metadata; @@ -85,6 +88,8 @@ public NetworkClientDelegate( this.unsentRequests = new ArrayDeque<>(); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); + this.metadataError = Optional.empty(); + this.notifyMetadataErrorsViaErrorQueue = notifyMetadataErrorsViaErrorQueue; } // Visible for testing @@ -150,7 +155,11 @@ private void maybePropagateMetadataError() { try { metadata.maybeThrowAnyException(); } catch (Exception e) { - backgroundEventHandler.add(new ErrorEvent(e)); + if (notifyMetadataErrorsViaErrorQueue) { + backgroundEventHandler.add(new ErrorEvent(e)); + } else { + metadataError = Optional.of(e); + } } } @@ -230,6 +239,12 @@ private ClientRequest makeClientRequest( unsent.handler ); } + + public Optional getAndClearMetadataError() { + Optional metadataError = this.metadataError; + this.metadataError = Optional.empty(); + return metadataError; + } public Node leastLoadedNode() { return this.client.leastLoadedNode(time.milliseconds()).node(); @@ -412,7 +427,8 @@ public static Supplier supplier(final Time time, final Metrics metrics, final Sensor throttleTimeSensor, final ClientTelemetrySender clientTelemetrySender, - final BackgroundEventHandler backgroundEventHandler) { + final BackgroundEventHandler backgroundEventHandler, + final boolean notifyMetadataErrorsViaErrorQueue) { return new CachedSupplier<>() { @Override protected NetworkClientDelegate create() { @@ -426,7 +442,7 @@ protected NetworkClientDelegate create() { metadata, throttleTimeSensor, clientTelemetrySender); - return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler); + return new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue); } }; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java index a5eb91128d3dd..eda01ee3599f9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java @@ -990,16 +990,18 @@ UnsentRequest buildRequest() { } ShareAcknowledgeRequest.Builder requestBuilder = sessionHandler.newShareAcknowledgeBuilder(groupId, fetchConfig); - Node nodeToSend = metadata.fetch().nodeById(nodeId); - log.trace("Building acknowledgements to send : {}", finalAcknowledgementsToSend); - nodesWithPendingRequests.add(nodeId); isProcessed = false; if (requestBuilder == null) { handleSessionErrorCode(Errors.SHARE_SESSION_NOT_FOUND); return null; } else { + Node nodeToSend = metadata.fetch().nodeById(nodeId); + nodesWithPendingRequests.add(nodeId); + + log.trace("Building acknowledgements to send : {}", finalAcknowledgementsToSend); + inFlightAcknowledgements.putAll(finalAcknowledgementsToSend); if (incompleteAcknowledgements.isEmpty()) { acknowledgementsToSend.clear(); @@ -1082,12 +1084,16 @@ void handleAcknowledgeTimedOut(TopicIdPartition tip) { * being sent. */ void handleSessionErrorCode(Errors errorCode) { - inFlightAcknowledgements.forEach((tip, acks) -> { + Map acknowledgementsMapToClear = + incompleteAcknowledgements.isEmpty() ? acknowledgementsToSend : incompleteAcknowledgements; + + acknowledgementsMapToClear.forEach((tip, acks) -> { if (acks != null) { acks.setAcknowledgeErrorCode(errorCode); } resultHandler.complete(tip, acks, onCommitAsync()); }); + acknowledgementsMapToClear.clear(); processingComplete(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index c4f77bf1c6f6c..0ca371253bcbd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -279,7 +279,8 @@ private enum AcknowledgementMode { metrics, shareFetchMetricsManager.throttleTimeSensor(), clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), - backgroundEventHandler + backgroundEventHandler, + true ); this.completedAcknowledgements = new LinkedList<>(); @@ -378,7 +379,7 @@ private enum AcknowledgementMode { final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); final Supplier networkClientDelegateSupplier = - () -> new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler); + () -> new NetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java index d7469afdc9e70..100c9ce61b6d4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java @@ -178,6 +178,8 @@ public ShareFetchRequest.Builder newShareFetchBuilder(String groupId, FetchConfi public ShareAcknowledgeRequest.Builder newShareAcknowledgeBuilder(String groupId, FetchConfig fetchConfig) { if (nextMetadata.isNewSession()) { // A share session cannot be started with a ShareAcknowledge request + nextPartitions.clear(); + nextAcknowledgements.clear(); return null; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java index 2c7fdd7464283..5f1ced33e3a09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CheckAndUpdatePositionsEvent.java @@ -18,6 +18,9 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.SubscriptionState; +import org.apache.kafka.common.TopicPartition; + +import java.time.Duration; /** * Event to check if all assigned partitions have fetch positions. If there are positions missing, it will fetch @@ -32,4 +35,15 @@ public class CheckAndUpdatePositionsEvent extends CompletableApplicationEvent consumer.unsubscribe()); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testCloseWithInvalidTopicException() { - consumer = newConsumer(); - backgroundEventQueue.add(new ErrorEvent(new InvalidTopicException("Invalid topic name"))); - completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testUnsubscribeWithTopicAuthorizationException() { - consumer = newConsumer(); - backgroundEventQueue.add(new ErrorEvent(new TopicAuthorizationException(Set.of("test-topic")))); - completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.unsubscribe()); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testCloseWithTopicAuthorizationException() { - consumer = newConsumer(); - backgroundEventQueue.add(new ErrorEvent(new TopicAuthorizationException(Set.of("test-topic")))); - completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.close()); - } - @Test public void testCommitAsyncWithNullCallback() { consumer = newConsumer(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index f1db099203a02..e97efd2b75feb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -3700,7 +3700,7 @@ private void buildDependencies(MetricConfig metricConfig, properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); ConsumerConfig config = new ConsumerConfig(properties); - networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler)); + networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true)); } private List collectRecordOffsets(List> records) { @@ -3777,8 +3777,9 @@ public TestableNetworkClientDelegate(Time time, LogContext logContext, KafkaClient client, Metadata metadata, - BackgroundEventHandler backgroundEventHandler) { - super(time, config, logContext, client, metadata, backgroundEventHandler); + BackgroundEventHandler backgroundEventHandler, + boolean notifyMetadataErrorsViaErrorQueue) { + super(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue); } @Override diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 10e454499431f..4177419fa534c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -52,6 +52,7 @@ import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -78,7 +79,7 @@ public void setup() { @Test void testPollResultTimer() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() @@ -102,7 +103,7 @@ void testPollResultTimer() throws Exception { @Test public void testSuccessfulResponse() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); prepareFindCoordinatorResponse(Errors.NONE); @@ -116,7 +117,7 @@ public void testSuccessfulResponse() throws Exception { @Test public void testTimeoutBeforeSend() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { client.setUnreachable(mockNode(), REQUEST_TIMEOUT_MS); NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); ncd.add(unsentRequest); @@ -130,7 +131,7 @@ public void testTimeoutBeforeSend() throws Exception { @Test public void testTimeoutAfterSend() throws Exception { - try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { + try (NetworkClientDelegate ncd = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); ncd.add(unsentRequest); ncd.poll(0, time.milliseconds()); @@ -164,7 +165,7 @@ public void testEnsureCorrectCompletionTimeOnComplete() { @Test public void testEnsureTimerSetOnAdd() { - NetworkClientDelegate ncd = newNetworkClientDelegate(); + NetworkClientDelegate ncd = newNetworkClientDelegate(false); NetworkClientDelegate.UnsentRequest findCoordRequest = newUnsentFindCoordinatorRequest(); assertNull(findCoordRequest.timer()); @@ -181,7 +182,7 @@ public void testEnsureTimerSetOnAdd() { @Test public void testHasAnyPendingRequests() throws Exception { - try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate()) { + try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); networkClientDelegate.add(unsentRequest); @@ -212,9 +213,24 @@ public void testPropagateMetadataError() { AuthenticationException authException = new AuthenticationException("Test Auth Exception"); doThrow(authException).when(metadata).maybeThrowAnyException(); + NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false); + assertTrue(networkClientDelegate.getAndClearMetadataError().isEmpty()); + networkClientDelegate.poll(0, time.milliseconds()); + + Optional metadataError = networkClientDelegate.getAndClearMetadataError(); + assertTrue(metadataError.isPresent()); + assertInstanceOf(AuthenticationException.class, metadataError.get()); + assertEquals(authException.getMessage(), metadataError.get().getMessage()); + } + + @Test + public void testPropagateMetadataErrorWithErrorEvent() { + AuthenticationException authException = new AuthenticationException("Test Auth Exception"); + doThrow(authException).when(metadata).maybeThrowAnyException(); + LinkedList backgroundEventQueue = new LinkedList<>(); this.backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue); - NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(); + NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(true); assertEquals(0, backgroundEventQueue.size()); networkClientDelegate.poll(0, time.milliseconds()); @@ -226,7 +242,7 @@ public void testPropagateMetadataError() { assertEquals(authException, ((ErrorEvent) event).error()); } - public NetworkClientDelegate newNetworkClientDelegate() { + public NetworkClientDelegate newNetworkClientDelegate(boolean notifyMetadataErrorsViaErrorQueue) { LogContext logContext = new LogContext(); Properties properties = new Properties(); properties.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -238,7 +254,8 @@ public NetworkClientDelegate newNetworkClientDelegate() { logContext, this.client, this.metadata, - this.backgroundEventHandler); + this.backgroundEventHandler, + notifyMetadataErrorsViaErrorQueue); } public NetworkClientDelegate.UnsentRequest newUnsentFindCoordinatorRequest() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index 59473ae9f8bdc..7231e0b639029 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -369,7 +369,7 @@ public void testCommitAsync() { } @Test - public void testServerDisconnectedOnShareAcknowledge() { + public void testServerDisconnectedOnShareAcknowledge() throws InterruptedException { buildRequestManager(); // Enabling the config so that background event is sent when the acknowledgement response is received. shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); @@ -384,22 +384,46 @@ public void testServerDisconnectedOnShareAcknowledge() { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + fetchRecords(); + Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(1L, AcknowledgeType.ACCEPT); acknowledgements.add(2L, AcknowledgeType.ACCEPT); - acknowledgements.add(3L, AcknowledgeType.REJECT); shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(3L, AcknowledgeType.REJECT); + + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements2)); + client.prepareResponse(null, true); networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); assertEquals(Errors.UNKNOWN_SERVER_ERROR, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); completedAcknowledgements.clear(); + + assertEquals(1, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); + + TestUtils.retryOnExceptionWithTimeout(() -> { + assertEquals(0, shareConsumeRequestManager.sendAcknowledgements()); + // We expect the remaining acknowledgements to be cleared due to share session epoch being set to 0. + assertNull(shareConsumeRequestManager.requestStates(0)); + // The callback for these unsent acknowledgements will be invoked with an error code. + assertEquals(Collections.singletonMap(tip0, acknowledgements2), completedAcknowledgements.get(0)); + assertEquals(Errors.SHARE_SESSION_NOT_FOUND, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); + }); + + // Attempt a normal fetch to check if nodesWithPendingRequests is empty. + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); } @Test @@ -1663,7 +1687,8 @@ private void buildDependencies(MetricConfig metricConfig, properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); ConsumerConfig config = new ConsumerConfig(properties); - networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, new BackgroundEventHandler(new LinkedBlockingQueue<>()))); + networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, + new BackgroundEventHandler(new LinkedBlockingQueue<>()), false)); } private class TestableShareConsumeRequestManager extends ShareConsumeRequestManager { @@ -1715,8 +1740,9 @@ public TestableNetworkClientDelegate(Time time, LogContext logContext, KafkaClient client, Metadata metadata, - BackgroundEventHandler backgroundEventHandler) { - super(time, config, logContext, client, metadata, backgroundEventHandler); + BackgroundEventHandler backgroundEventHandler, + boolean notifyMetadataErrorsViaErrorQueue) { + super(time, config, logContext, client, metadata, backgroundEventHandler, notifyMetadataErrorsViaErrorQueue); } @Override diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java index df8a61a750384..0ce2f349f98ee 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicIdPartition; @@ -41,6 +42,7 @@ import static org.apache.kafka.common.requests.ShareRequestMetadata.INITIAL_EPOCH; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -415,6 +417,39 @@ public void testAddNewIdAfterTopicRemovedFromSession() { assertEquals(2, requestData3.shareSessionEpoch(), "Did not have the correct session epoch"); } + @Test + public void testNextAcknowledgementsClearedOnInvalidRequest() { + String groupId = "G1"; + Uuid memberId = Uuid.randomUuid(); + ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + + Map topicIds = new HashMap<>(); + Map topicNames = new HashMap<>(); + Uuid fooId = addTopicId(topicIds, topicNames, "foo"); + TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + + handler.addPartitionToFetch(foo0, acknowledgements); + + // As we start with a ShareAcknowledge on epoch 0, we expect a null response. + assertNull(handler.newShareAcknowledgeBuilder(groupId, fetchConfig)); + + // Attempt a new ShareFetch + TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); + handler.addPartitionToFetch(foo1, null); + ShareFetchRequestData requestData = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); + + // We should have cleared the unsent acknowledgements before this ShareFetch. + assertEquals(0, requestData.topics().get(0).partitions().get(0).acknowledgementBatches().size()); + + ArrayList expectedToSend1 = new ArrayList<>(); + expectedToSend1.add(new TopicIdPartition(fooId, 1, "foo")); + assertListEquals(expectedToSend1, reqFetchList(requestData, topicNames)); + assertEquals(memberId.toString(), requestData.memberId()); + } + private Uuid addTopicId(Map topicIds, Map topicNames, String name) { // If the same topic name is added more than once, the latest mapping will be in the // topicIds, but all mappings will be in topicNames. This is needed in the replace tests. diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java index bef30a113df7d..1c21038e66a1a 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java @@ -1360,6 +1360,10 @@ public void run() { */ @Override public void complete(Throwable exception) { + if (future.isDone()) { + return; + } + final long purgatoryTimeMs = time.milliseconds() - deferredEventQueuedTimestamp; CompletableFuture appendFuture = result != null ? result.appendFuture() : null; @@ -1653,6 +1657,10 @@ public void run() { */ @Override public void complete(Throwable exception) { + if (future.isDone()) { + return; + } + final long purgatoryTimeMs = time.milliseconds() - deferredEventQueuedTimestamp; if (exception == null) { future.complete(null); diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java index 591b37e2fb450..391813250c147 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java @@ -31,6 +31,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; + public class CoordinatorRuntimeMetricsImpl implements CoordinatorRuntimeMetrics { /** @@ -291,7 +293,7 @@ public void recordEventProcessingTime(long durationMs) { @Override public void recordEventPurgatoryTime(long purgatoryTimeMs) { - eventPurgatoryTimeSensor.record(purgatoryTimeMs); + eventPurgatoryTimeSensor.record(Math.min(MAX_LATENCY_MS, purgatoryTimeMs)); } @Override diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java index bb637d94b27a0..7285b58ffabf4 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java @@ -36,6 +36,7 @@ import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.EVENT_PURGATORY_TIME_METRIC_NAME; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.EVENT_QUEUE_TIME_METRIC_NAME; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.NUM_PARTITIONS_METRIC_NAME; +import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -204,6 +205,23 @@ public void testHistogramMetrics(String metricNamePrefix) { assertEquals(999.0, metric.metricValue()); } + @Test + public void testRecordEventPurgatoryTimeLimit() { + Time time = new MockTime(); + Metrics metrics = new Metrics(time); + + CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); + + IntStream.range(1, 1001).forEach(__ -> runtimeMetrics.recordEventPurgatoryTime(MAX_LATENCY_MS + 1000L)); + + MetricName metricName = kafkaMetricName(metrics, EVENT_PURGATORY_TIME_METRIC_NAME + "-max"); + KafkaMetric metric = metrics.metrics().get(metricName); + long value = ((Double) metric.metricValue()).longValue(); + + // 3 sigfigs in HdrHistogram is not precise enough. + assertTrue(value >= MAX_LATENCY_MS && value < MAX_LATENCY_MS + 1000L); + } + private static void assertMetricGauge(Metrics metrics, org.apache.kafka.common.MetricName metricName, long count) { assertEquals(count, (long) metrics.metric(metricName).metricValue()); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java index 34364887ae1b7..40d059fa3d6ec 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java @@ -4410,6 +4410,144 @@ public void testRecordEventPurgatoryTime() throws Exception { verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); } + @Test + public void testWriteEventCompletesOnlyOnce() throws Exception { + // Completes once via timeout, then again with HWM update. + Duration writeTimeout = Duration.ofMillis(1000L); + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ManualEventProcessor processor = new ManualEventProcessor(); + CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(writeTimeout) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(processor) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(runtimeMetrics) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Loads the coordinator. Poll once to execute the load operation and once + // to complete the load. + runtime.scheduleLoadOperation(TP, 10); + processor.poll(); + processor.poll(); + + // write#1 will be committed and update the high watermark. Record time spent in purgatory. + CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout, + state -> new CoordinatorResult<>(List.of("record1"), "response1") + ); + + processor.poll(); + + // Records have been written to the log. + long writeTimestamp = timer.time().milliseconds(); + assertEquals(Collections.singletonList( + records(writeTimestamp, "record1") + ), writer.entries(TP)); + + // There is no pending high watermark. + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Advance the clock to time out the write event. Confirm write#1 is completed with a timeout. + timer.advanceClock(writeTimeout.toMillis() + 1L); + processor.poll(); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); + assertTrue(write1.isCompletedExceptionally()); + + // HWM update + writer.commit(TP, 1); + assertEquals(1, processor.size()); + assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Poll once to process the high watermark update and complete write#1. It has already + // been completed and this is a noop. + processor.poll(); + + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); + assertTrue(write1.isCompletedExceptionally()); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L); + } + + @Test + public void testCompleteTransactionEventCompletesOnlyOnce() throws Exception { + // Completes once via timeout, then again with HWM update. + Duration writeTimeout = Duration.ofMillis(1000L); + MockTimer timer = new MockTimer(); + MockPartitionWriter writer = new MockPartitionWriter(); + ManualEventProcessor processor = new ManualEventProcessor(); + CoordinatorRuntimeMetrics runtimeMetrics = mock(CoordinatorRuntimeMetrics.class); + + CoordinatorRuntime runtime = + new CoordinatorRuntime.Builder() + .withTime(timer.time()) + .withTimer(timer) + .withDefaultWriteTimeOut(writeTimeout) + .withLoader(new MockCoordinatorLoader()) + .withEventProcessor(processor) + .withPartitionWriter(writer) + .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) + .withCoordinatorRuntimeMetrics(runtimeMetrics) + .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) + .withSerializer(new StringSerializer()) + .withExecutorService(mock(ExecutorService.class)) + .build(); + + // Loads the coordinator. Poll once to execute the load operation and once + // to complete the load. + runtime.scheduleLoadOperation(TP, 10); + processor.poll(); + processor.poll(); + + // transaction completion. + CompletableFuture write1 = runtime.scheduleTransactionCompletion( + "transactional-write", + TP, + 100L, + (short) 50, + 1, + TransactionResult.COMMIT, + writeTimeout + ); + processor.poll(); + + // Records have been written to the log. + assertEquals(List.of( + endTransactionMarker(100, (short) 50, timer.time().milliseconds(), 1, ControlRecordType.COMMIT) + ), writer.entries(TP)); + + // The write timeout tasks exist. + assertEquals(1, timer.size()); + assertFalse(write1.isDone()); + + // Advance the clock to time out the write event. Confirm write#1 is completed with a timeout. + timer.advanceClock(writeTimeout.toMillis() + 1L); + processor.poll(); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1); + assertTrue(write1.isCompletedExceptionally()); + + // HWM update + writer.commit(TP, 1); + assertEquals(1, processor.size()); + assertEquals(1, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + + // Poll once to process the high watermark update and complete write#1. It has already + // been completed and this is a noop. + processor.poll(); + + assertEquals(NO_OFFSET, runtime.contextOrThrow(TP).highWatermarklistener.lastHighWatermark()); + assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); + assertTrue(write1.isCompletedExceptionally()); + verify(runtimeMetrics, times(1)).recordEventPurgatoryTime(writeTimeout.toMillis() + 1L); + } @Test public void testCoordinatorExecutor() { Duration writeTimeout = Duration.ofMillis(1000); diff --git a/core/src/main/java/kafka/server/TierStateMachine.java b/core/src/main/java/kafka/server/TierStateMachine.java index f5f83cb240bae..ddb19e86aeca1 100644 --- a/core/src/main/java/kafka/server/TierStateMachine.java +++ b/core/src/main/java/kafka/server/TierStateMachine.java @@ -229,12 +229,8 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, } RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlm.fetchRemoteLogSegmentMetadata(topicPartition, targetEpoch, previousOffsetToLeaderLocalLogStartOffset) - .orElseThrow(() -> new RemoteStorageException("Couldn't build the state from remote store for partition: " + topicPartition + - ", currentLeaderEpoch: " + currentLeaderEpoch + - ", leaderLocalLogStartOffset: " + leaderLocalLogStartOffset + - ", leaderLogStartOffset: " + leaderLogStartOffset + - ", epoch: " + targetEpoch + - "as the previous remote log segment metadata was not found")); + .orElseThrow(() -> buildRemoteStorageException(topicPartition, targetEpoch, currentLeaderEpoch, + leaderLocalLogStartOffset, leaderLogStartOffset)); // Build leader epoch cache, producer snapshots until remoteLogSegmentMetadata.endOffset() and start @@ -265,4 +261,17 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, return nextOffset; } + + private RemoteStorageException buildRemoteStorageException(TopicPartition topicPartition, + int targetEpoch, + int currentLeaderEpoch, + long leaderLocalLogStartOffset, + long leaderLogStartOffset) { + String message = String.format( + "Couldn't build the state from remote store for partition: %s, currentLeaderEpoch: %d, " + + "leaderLocalLogStartOffset: %d, leaderLogStartOffset: %d, epoch: %d as the previous remote log segment metadata was not found", + topicPartition, currentLeaderEpoch, leaderLocalLogStartOffset, leaderLogStartOffset, targetEpoch + ); + return new RemoteStorageException(message); + } } diff --git a/core/src/main/java/kafka/server/share/SharePartition.java b/core/src/main/java/kafka/server/share/SharePartition.java index 00ec974a72239..3dee1e35f3e28 100644 --- a/core/src/main/java/kafka/server/share/SharePartition.java +++ b/core/src/main/java/kafka/server/share/SharePartition.java @@ -71,7 +71,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -371,52 +370,25 @@ public static RecordState forId(byte id) { */ public CompletableFuture maybeInitialize() { log.debug("Maybe initialize share partition: {}-{}", groupId, topicIdPartition); - CompletableFuture future = new CompletableFuture<>(); - AtomicReference> futureException = new AtomicReference<>(Optional.empty()); // Check if the share partition is already initialized. - InitializationResult initializationResult = checkInitializationCompletion(); - if (initializationResult.isComplete()) { - if (initializationResult.throwable() != null) { - future.completeExceptionally(initializationResult.throwable()); - } else { - future.complete(null); - } - return future; + try { + if (initializedOrThrowException()) return CompletableFuture.completedFuture(null); + } catch (Exception e) { + return CompletableFuture.failedFuture(e); } + // If code reaches here then the share partition is not initialized. Initialize the share partition. // All the pending requests should wait to get completed before the share partition is initialized. - // Attain lock to avoid any concurrent requests to be processed. - lock.writeLock().lock(); - boolean shouldFutureBeCompleted = false; + // Attain lock while updating the state to avoid any concurrent requests to be processed. try { - // Re-check the state to verify if previous requests has already initialized the share partition. - initializationResult = checkInitializationCompletion(); - if (initializationResult.isComplete()) { - if (initializationResult.throwable() != null) { - futureException.set(Optional.of(initializationResult.throwable())); - } - shouldFutureBeCompleted = true; - return future; - } - - // Update state to initializing to avoid any concurrent requests to be processed. - partitionState = SharePartitionState.INITIALIZING; + if (!emptyToInitialState()) return CompletableFuture.completedFuture(null); } catch (Exception e) { - log.error("Failed to initialize the share partition: {}-{}", groupId, topicIdPartition, e); - completeInitializationWithException(); - futureException.set(Optional.of(e)); - shouldFutureBeCompleted = true; - return future; - } finally { - lock.writeLock().unlock(); - if (shouldFutureBeCompleted) { - if (futureException.get().isPresent()) { - future.completeExceptionally(futureException.get().get()); - } else { - future.complete(null); - } - } + return CompletableFuture.failedFuture(e); } + + // The share partition is not initialized, hence try to initialize it. There shall be only one + // request trying to initialize the share partition. + CompletableFuture future = new CompletableFuture<>(); // Initialize the share partition by reading the state from the persister. persister.readState(new ReadShareGroupStateParameters.Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() @@ -426,20 +398,19 @@ public CompletableFuture maybeInitialize() { .build()) .build() ).whenComplete((result, exception) -> { + Throwable throwable = null; lock.writeLock().lock(); try { if (exception != null) { log.error("Failed to initialize the share partition: {}-{}", groupId, topicIdPartition, exception); - completeInitializationWithException(); - futureException.set(Optional.of(exception)); + throwable = exception; return; } if (result == null || result.topicsData() == null || result.topicsData().size() != 1) { log.error("Failed to initialize the share partition: {}-{}. Invalid state found: {}.", groupId, topicIdPartition, result); - completeInitializationWithException(); - futureException.set(Optional.of(new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -447,8 +418,7 @@ public CompletableFuture maybeInitialize() { if (state.topicId() != topicIdPartition.topicId() || state.partitions().size() != 1) { log.error("Failed to initialize the share partition: {}-{}. Invalid topic partition response: {}.", groupId, topicIdPartition, result); - completeInitializationWithException(); - futureException.set(Optional.of(new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -456,8 +426,7 @@ public CompletableFuture maybeInitialize() { if (partitionData.partition() != topicIdPartition.partition()) { log.error("Failed to initialize the share partition: {}-{}. Invalid partition response: {}.", groupId, topicIdPartition, partitionData); - completeInitializationWithException(); - futureException.set(Optional.of(new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } @@ -465,18 +434,11 @@ public CompletableFuture maybeInitialize() { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); log.error("Failed to initialize the share partition: {}-{}. Exception occurred: {}.", groupId, topicIdPartition, partitionData); - completeInitializationWithException(); - futureException.set(Optional.of(ex)); + throwable = ex; return; } - try { - startOffset = startOffsetDuringInitialization(partitionData.startOffset()); - } catch (Exception e) { - completeInitializationWithException(); - futureException.set(Optional.of(e)); - return; - } + startOffset = startOffsetDuringInitialization(partitionData.startOffset()); stateEpoch = partitionData.stateEpoch(); List stateBatches = partitionData.stateBatches(); @@ -485,8 +447,7 @@ public CompletableFuture maybeInitialize() { log.error("Invalid state batch found for the share partition: {}-{}. The base offset: {}" + " is less than the start offset: {}.", groupId, topicIdPartition, stateBatch.firstOffset(), startOffset); - completeInitializationWithException(); - futureException.set(Optional.of(new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)))); + throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } InFlightBatch inFlightBatch = new InFlightBatch(EMPTY_MEMBER_ID, stateBatch.firstOffset(), @@ -507,10 +468,18 @@ public CompletableFuture maybeInitialize() { } // Set the partition state to Active and complete the future. partitionState = SharePartitionState.ACTIVE; + } catch (Exception e) { + throwable = e; } finally { + boolean isFailed = throwable != null; + if (isFailed) { + partitionState = SharePartitionState.FAILED; + } + // Release the lock. lock.writeLock().unlock(); - if (futureException.get().isPresent()) { - future.completeExceptionally(futureException.get().get()); + // Complete the future. + if (isFailed) { + future.completeExceptionally(throwable); } else { future.complete(null); } @@ -1178,32 +1147,31 @@ private boolean stateNotActive() { return partitionState() != SharePartitionState.ACTIVE; } - private void completeInitializationWithException() { + private boolean emptyToInitialState() { lock.writeLock().lock(); try { - partitionState = SharePartitionState.FAILED; + if (initializedOrThrowException()) return false; + partitionState = SharePartitionState.INITIALIZING; + return true; } finally { lock.writeLock().unlock(); } } - private InitializationResult checkInitializationCompletion() { + private boolean initializedOrThrowException() { SharePartitionState currentState = partitionState(); - switch (currentState) { - case ACTIVE: - return new InitializationResult(true); - case FAILED: - return new InitializationResult(true, new IllegalStateException(String.format("Share partition failed to load %s-%s", groupId, topicIdPartition))); - case INITIALIZING: - return new InitializationResult(true, new LeaderNotAvailableException(String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition))); - case FENCED: - return new InitializationResult(true, new FencedStateEpochException(String.format("Share partition is fenced %s-%s", groupId, topicIdPartition))); - case EMPTY: - // Do not complete the future as the share partition is not yet initialized. - return new InitializationResult(false); - default: - throw new IllegalStateException("Unknown share partition state: " + currentState); - } + return switch (currentState) { + case ACTIVE -> true; + case FAILED -> throw new IllegalStateException( + String.format("Share partition failed to load %s-%s", groupId, topicIdPartition)); + case INITIALIZING -> throw new LeaderNotAvailableException( + String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition)); + case FENCED -> throw new FencedStateEpochException( + String.format("Share partition is fenced %s-%s", groupId, topicIdPartition)); + case EMPTY -> + // The share partition is not yet initialized. + false; + }; } private AcquiredRecords acquireNewBatchRecords( @@ -2517,26 +2485,4 @@ void updateOffsetMetadata(long offset, LogOffsetMetadata offsetMetadata) { this.offsetMetadata = offsetMetadata; } } - - static final class InitializationResult { - private final boolean isComplete; - private final Throwable throwable; - - private InitializationResult(boolean isComplete) { - this(isComplete, null); - } - - private InitializationResult(boolean isComplete, Throwable throwable) { - this.isComplete = isComplete; - this.throwable = throwable; - } - - private boolean isComplete() { - return isComplete; - } - - private Throwable throwable() { - return throwable; - } - } } diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 709b7205f2a47..93978745f4a6c 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -300,22 +300,7 @@ public CompletableFuture allFutures = CompletableFuture.allOf( - futures.values().toArray(new CompletableFuture[0])); - return allFutures.thenApply(v -> { - Map result = new HashMap<>(); - futures.forEach((topicIdPartition, future) -> { - ShareAcknowledgeResponseData.PartitionData partitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(topicIdPartition.partition()); - Throwable t = future.join(); - if (t != null) { - partitionData.setErrorCode(Errors.forException(t).code()) - .setErrorMessage(t.getMessage()); - } - result.put(topicIdPartition, partitionData); - }); - return result; - }); + return mapAcknowledgementFutures(futures); } /** @@ -376,6 +361,10 @@ public CompletableFuture> mapAcknowledgementFutures(Map> futuresMap) { CompletableFuture allFutures = CompletableFuture.allOf( futuresMap.values().toArray(new CompletableFuture[0])); return allFutures.thenApply(v -> { diff --git a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala index c1d8b4abc8ed7..451bb5a851132 100644 --- a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala +++ b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala @@ -272,6 +272,25 @@ final class KafkaMetadataLog private ( ) } + /* + Perform a check that the requested snapshot offset is batch aligned via a log read, which + returns the base offset of the batch that contains the requested offset. A snapshot offset + is one greater than the last offset contained in the snapshot, and cannot go past the high + watermark. + + This check is necessary because Raft replication code assumes the snapshot offset is the + start of a batch. If a follower applies a non-batch aligned snapshot at offset (X) and + fetches from this offset, the returned batch will start at offset (X - M), and the + follower will be unable to append it since (X - M) < (X). + */ + val baseOffset = read(snapshotId.offset, Isolation.COMMITTED).startOffsetMetadata.offset + if (snapshotId.offset != baseOffset) { + throw new IllegalArgumentException( + s"Cannot create snapshot at offset (${snapshotId.offset}) because it is not batch aligned. " + + s"The batch containing the requested offset has a base offset of ($baseOffset)" + ) + } + createNewSnapshotUnchecked(snapshotId) } diff --git a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala index 8110fbe2be782..0a2d4eb493079 100644 --- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala @@ -1273,7 +1273,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testCommitWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) @@ -1310,7 +1310,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testCommitWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() @@ -1328,7 +1328,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testOffsetFetchWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() consumer.assign(List(tp).asJava) @@ -1336,7 +1336,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034")) def testOffsetFetchWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) @@ -1581,7 +1581,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testListOffsetsWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(Set(tp).asJava)) diff --git a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala index e1ed205cd29c2..9560f060d0fd0 100644 --- a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala @@ -171,7 +171,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * Tests the ability of producing and consuming with the appropriate ACLs set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeViaAssign(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() @@ -200,7 +200,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeViaSubscribe(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() @@ -210,7 +210,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeWithWildcardAcls(quorum: String, groupProtocol: String): Unit = { setWildcardResourceAcls() val producer = createProducer() @@ -222,7 +222,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeWithPrefixedAcls(quorum: String, groupProtocol: String): Unit = { setPrefixedResourceAcls() val producer = createProducer() @@ -234,7 +234,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testProduceConsumeTopicAutoCreateTopicCreateAcl(quorum: String, groupProtocol: String): Unit = { // topic2 is not created on setup() val tp2 = new TopicPartition("topic2", 0) @@ -404,7 +404,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * ACL set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithoutDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() @@ -415,7 +415,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithoutDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() @@ -456,7 +456,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() @@ -468,7 +468,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoConsumeWithDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() @@ -497,7 +497,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * ACL set. */ @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testNoGroupAcl(quorum: String, groupProtocol: String): Unit = { val superuserAdminClient = createSuperuserAdminClient() superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values diff --git a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala index 74d870837dd4f..1929bc87e9afc 100644 --- a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala @@ -59,7 +59,7 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { */ @Timeout(15) @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) - @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696")) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) def testTwoConsumersWithDifferentSaslCredentials(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) consumerConfig.putIfAbsent(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index 45712240ecd6d..f4d6816da8cc4 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -576,8 +576,8 @@ object QuorumTestHarness { // The following parameter groups are to *temporarily* avoid bugs with the CONSUMER group protocol Consumer // implementation that would otherwise cause tests to fail. def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_16176: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly - def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17696: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17960: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17961: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_17964: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly + def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_KAFKA_18034: stream.Stream[Arguments] = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly } diff --git a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala index 1c65fd5073cd9..285560d382686 100644 --- a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala +++ b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala @@ -142,13 +142,24 @@ final class KafkaMetadataLogTest { // Test finding the first epoch log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, firstEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, firstEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(1, firstEpoch)).get().close() // Test finding the second epoch log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords, secondEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords - 1, secondEpoch)).get().close() - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, secondEpoch)).get().close() + } + + @Test + def testCreateSnapshotInMiddleOfBatch(): Unit = { + val numberOfRecords = 10 + val epoch = 1 + val log = buildMetadataLog(tempDir, mockTime) + + append(log, numberOfRecords, epoch) + log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) + + assertThrows( + classOf[IllegalArgumentException], + () => log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, epoch)) + ) } @Test @@ -206,7 +217,7 @@ final class KafkaMetadataLogTest { val snapshotId = new OffsetAndEpoch(numberOfRecords-4, epoch) val log = buildMetadataLog(tempDir, mockTime) - append(log, numberOfRecords, epoch) + (1 to numberOfRecords).foreach(_ => append(log, 1, epoch)) log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)) createNewSnapshot(log, snapshotId) @@ -282,7 +293,7 @@ final class KafkaMetadataLogTest { def testCreateExistingSnapshot(): Unit = { val numberOfRecords = 10 val epoch = 1 - val snapshotId = new OffsetAndEpoch(numberOfRecords - 1, epoch) + val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch) val log = buildMetadataLog(tempDir, mockTime) append(log, numberOfRecords, epoch) diff --git a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala index 31821ceb3a35d..666557fe15c57 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala @@ -486,6 +486,121 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ) } + /** + * The test method checks the following scenario: + * 1. Creating a classic group with member 1, whose assignment has non-empty user data. + * 2. Member 2 using consumer protocol joins. The group cannot be upgraded and the join is + * rejected. + * 3. Member 1 leaves. + * 4. Member 2 using consumer protocol joins. The group is upgraded. + */ + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") + ) + ) + def testOnlineMigrationWithNonEmptyUserDataInAssignment(): Unit = { + // Creates the __consumer_offsets topics because it won't be created automatically + // in this test because it does not use FindCoordinator API. + createOffsetsTopic() + + // Create the topic. + createTopic( + topic = "foo", + numPartitions = 3 + ) + + // Classic member 1 joins the classic group. + val groupId = "grp" + + val memberId1 = joinDynamicConsumerGroupWithOldProtocol( + groupId = groupId, + metadata = metadata(List.empty), + assignment = assignment(List(0, 1, 2), ByteBuffer.allocate(1)) + )._1 + + // The joining request with a consumer group member 2 is rejected. + val errorMessage = consumerGroupHeartbeat( + groupId = groupId, + memberId = Uuid.randomUuid.toString, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + expectedError = Errors.GROUP_ID_NOT_FOUND + ).errorMessage + + assertEquals( + "Cannot upgrade classic group grp to consumer group because an unsupported custom assignor is in use. " + + "Please refer to the documentation or switch to a default assignor before re-attempting the upgrade.", + errorMessage + ) + + // The group is still a classic group. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ClassicGroupState.STABLE.toString) + .setGroupType(Group.GroupType.CLASSIC.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CLASSIC.toString) + ) + ) + + // Classic member 1 leaves the group. + leaveGroup( + groupId = groupId, + memberId = memberId1, + useNewProtocol = false, + version = ApiKeys.LEAVE_GROUP.latestVersion(isUnstableApiEnabled) + ) + + // Verify that the group is empty. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ClassicGroupState.EMPTY.toString) + .setGroupType(Group.GroupType.CLASSIC.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CLASSIC.toString) + ) + ) + + // The joining request with a consumer group member is accepted. + consumerGroupHeartbeat( + groupId = groupId, + memberId = Uuid.randomUuid.toString, + rebalanceTimeoutMs = 5 * 60 * 1000, + subscribedTopicNames = List("foo"), + topicPartitions = List.empty, + expectedError = Errors.NONE + ) + + // The group has become a consumer group. + assertEquals( + List( + new ListGroupsResponseData.ListedGroup() + .setGroupId(groupId) + .setProtocolType("consumer") + .setGroupState(ConsumerGroupState.STABLE.toString) + .setGroupType(Group.GroupType.CONSUMER.toString) + ), + listGroups( + statesFilter = List.empty, + typesFilter = List(Group.GroupType.CONSUMER.toString) + ) + ) + } + private def testUpgradeFromEmptyClassicToConsumerGroup(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. @@ -1262,10 +1377,11 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).array } - private def assignment(assignedPartitions: List[Int]): Array[Byte] = { + private def assignment(assignedPartitions: List[Int], userData: ByteBuffer = null): Array[Byte] = { ConsumerProtocol.serializeAssignment( new ConsumerPartitionAssignor.Assignment( - assignedPartitions.map(new TopicPartition("foo", _)).asJava + assignedPartitions.map(new TopicPartition("foo", _)).asJava, + userData ) ).array } diff --git a/docs/streams/developer-guide/config-streams.html b/docs/streams/developer-guide/config-streams.html index bd9452827ae96..6233839014907 100644 --- a/docs/streams/developer-guide/config-streams.html +++ b/docs/streams/developer-guide/config-streams.html @@ -84,6 +84,7 @@
  • probing.rebalance.interval.ms
  • processing.exception.handler
  • processing.guarantee
  • +
  • processor.wrapper.class
  • production.exception.handler
  • rack.aware.assignment.non_overlap_cost
  • rack.aware.assignment.strategy
  • @@ -408,76 +409,83 @@

    num.standby.replicas"exactly_once" (for EOS version 1) and "exactly_once_beta" (for EOS version 2, requires broker version 2.5+). See Processing Guarantee - production.exception.handler + processor.wrapper.class + Medium + A class or class name implementing the ProcessorWrapper interface. + Must be passed in when creating the topology, and will not be applied unless passed in to the appropriate constructor as a TopologyConfig. You should + use the StreamsBuilder#new(TopologyConfig) constructor for DSL applications, and the + Topology#new(TopologyConfig) constructor for PAPI applications. + + production.exception.handler Medium Exception handling class that implements the ProductionExceptionHandler interface. DefaultProductionExceptionHandler - poll.ms + poll.ms Low The amount of time in milliseconds to block waiting for input. 100 milliseconds - rack.aware.assignment.tags + rack.aware.assignment.tags Medium List of tag keys used to distribute standby replicas across Kafka Streams clients. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over clients with different tag values. the empty list - replication.factor + replication.factor Medium The replication factor for changelog topics and repartition topics created by the application. The default of -1 (meaning: use broker default replication factor) requires broker version 2.4 or newer. -1 - retry.backoff.ms + retry.backoff.ms Medium The amount of time in milliseconds, before a request is retried. 100 - rocksdb.config.setter + rocksdb.config.setter Medium The RocksDB configuration. - state.cleanup.delay.ms + state.cleanup.delay.ms Low The amount of time in milliseconds to wait before deleting state when a partition has migrated. 600000 milliseconds (10 minutes) - state.dir + state.dir High Directory location for state stores. /${java.io.tmpdir}/kafka-streams - task.assignor.class + task.assignor.class Medium A task assignor class or class name implementing the TaskAssignor interface. The high-availability task assignor. - task.timeout.ms + task.timeout.ms Medium The maximum amount of time in milliseconds a task might stall due to internal errors and retries until an error is raised. For a timeout of 0 ms, a task would raise an error for the first internal error. For any timeout larger than 0 ms, a task will retry at least once before an error is raised. 300000 milliseconds (5 minutes) - topology.optimization + topology.optimization Medium A configuration telling Kafka Streams if it should optimize the topology and what optimizations to apply. Acceptable values are: StreamsConfig.NO_OPTIMIZATION (none), StreamsConfig.OPTIMIZE (all) or a comma separated list of specific optimizations: StreamsConfig.REUSE_KTABLE_SOURCE_TOPICS (reuse.ktable.source.topics), StreamsConfig.MERGE_REPARTITION_TOPICS (merge.repartition.topics), StreamsConfig.SINGLE_STORE_SELF_JOIN (single.store.self.join). NO_OPTIMIZATION - upgrade.from + upgrade.from Medium The version you are upgrading from during a rolling upgrade. See Upgrade From - windowstore.changelog.additional.retention.ms + windowstore.changelog.additional.retention.ms Low Added to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. 86400000 milliseconds (1 day) - window.size.ms + window.size.ms Low Sets window size for the deserializer in order to calculate window end times. null @@ -998,6 +1006,23 @@

    probing.rebalance.interval.ms +
    +

    processor.wrapper.class

    +
    +
    +

    + A class or class name implementing the ProcessorWrapper interface. This feature allows you to wrap any of the + processors in the compiled topology, including both custom processor implementations and those created by Streams for DSL operators. This can be useful for logging or tracing + implementations since it allows access to the otherwise-hidden processor context for DSL operators, and also allows for injecting additional debugging information to an entire + application topology with just a single config. +

    +

    + IMPORTANT: This MUST be passed in when creating the topology, and will not be applied unless passed in to the appropriate topology-building constructor. You should + use the StreamsBuilder#new(TopologyConfig) constructor for DSL applications, and the + Topology#new(TopologyConfig) constructor for PAPI applications. +

    +
    +

    replication.factor

    diff --git a/docs/streams/upgrade-guide.html b/docs/streams/upgrade-guide.html index 9bbf752bef8de..8c1446af7da60 100644 --- a/docs/streams/upgrade-guide.html +++ b/docs/streams/upgrade-guide.html @@ -67,6 +67,12 @@

    Upgrade Guide and API Changes

    For a table that shows Streams API compatibility with Kafka broker versions, see Broker Compatibility.

    Notable compatibility changes in past releases

    + +

    + Starting in version 4.0.0, Kafka Streams will only be compatible when running against brokers on version 2.1 + or higher. Additionally, exactly-once semantics (EOS) will require brokers to be at least version 2.5. +

    +

    Downgrading from 3.5.x or newer version to 3.4.x or older version needs special attention: Since 3.5.0 release, Kafka Streams uses a new serialization format for repartition topics. @@ -155,6 +161,18 @@

    Streams API TransformerSupplier, ValueTransformer, and ValueTransformerSupplier.

    +

    + You can now configure your topology with a ProcessorWrapper, which allows you to access and optionally wrap/replace + any processor in the topology by injecting an alternative ProcessorSupplier in its place. This can be used to peek + records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to + aid in testing or debugging scenarios. You must implement the ProcessorWrapper interface and then pass the class + or class name into the configs via the new StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG config. NOTE: this config is + applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating + the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that + accepts a TopologyConfig parameter for the StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG to be picked up. + See KIP-1112 for more details. +

    +

    Streams API changes in 3.9.0

    @@ -1634,7 +1652,7 @@

    - Kafka Broker (columns) + Kafka Broker (columns) @@ -1642,37 +1660,56 @@

    Kafka Streams API (rows) 0.10.0.x 0.10.1.x and 0.10.2.x - 0.11.0.x and
    1.0.x and
    1.1.x and
    2.0.x and
    2.1.x and
    2.2.x and
    2.3.x and
    2.4.x and
    2.5.x and
    2.6.x and
    2.7.x and
    2.8.x and
    3.0.x and
    3.1.x and
    3.2.x and
    3.3.x and
    3.4.x and
    3.5.x and
    3.6.x and
    3.7.x and
    3.8.x and
    3.9.x + 0.11.0.x and
    1.0.x and
    1.1.x and
    2.0.x + 2.1.x and
    2.2.x and
    2.3.x and
    2.4.x and
    2.5.x and
    2.6.x and
    2.7.x and
    2.8.x and
    3.0.x and
    3.1.x and
    3.2.x and
    3.3.x and
    3.4.x and
    3.5.x and
    3.6.x and
    3.7.x and
    3.8.x and
    3.9.x + 4.0.x 0.10.0.x compatible compatible compatible + compatible + 0.10.1.x and 0.10.2.x compatible compatible + compatible + 0.11.0.x compatible with exactly-once turned off
    (requires broker version 0.11.0.x or higher) compatible + 1.0.x and
    1.1.x and
    2.0.x and
    2.1.x and
    2.2.0 and
    2.2.0 compatible with exactly-once turned off
    (requires broker version 0.11.0.x or higher);
    requires message format 0.10 or higher;
    message headers are not supported
    (requires broker version 0.11.0.x or higher
    with message format 0.11 or higher) compatible; requires message format 0.10 or higher;
    if message headers are used, message format 0.11
    or higher required + compatible + 2.2.1 and
    2.3.x and
    2.4.x and
    2.5.x and
    2.6.x and
    2.7.x and
    2.8.x and
    3.0.x and
    3.1.x and
    3.2.x and
    3.3.x and
    3.4.x and
    3.5.x and
    3.6.x and
    3.7.x and
    3.8.x and
    3.9.x - compatible; requires message format 0.11 or higher;
    enabling exactly-once v2 requires 2.4.x or higher + compatible; requires message format 0.11 or higher;
    enabling exactly-once v2 requires 2.5.x or higher + compatible + + + + 4.0.x + + + + compatible; enabling exactly-once v2 requires broker version 2.5.x or higher + compatible diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle index 83f2c1a98fc36..6f65f6e874fb8 100644 --- a/gradle/dependencies.gradle +++ b/gradle/dependencies.gradle @@ -85,9 +85,6 @@ versions += [ jose4j: "0.9.4", junit: "5.10.2", jqwik: "1.8.3", - kafka_0100: "0.10.0.1", - kafka_0101: "0.10.1.1", - kafka_0102: "0.10.2.2", kafka_0110: "0.11.0.3", kafka_10: "1.0.2", kafka_11: "1.1.1", @@ -188,9 +185,6 @@ libs += [ junitPlatformLanucher: "org.junit.platform:junit-platform-launcher:$versions.junitPlatform", jqwik: "net.jqwik:jqwik:$versions.jqwik", hamcrest: "org.hamcrest:hamcrest:$versions.hamcrest", - kafkaStreams_0100: "org.apache.kafka:kafka-streams:$versions.kafka_0100", - kafkaStreams_0101: "org.apache.kafka:kafka-streams:$versions.kafka_0101", - kafkaStreams_0102: "org.apache.kafka:kafka-streams:$versions.kafka_0102", kafkaStreams_0110: "org.apache.kafka:kafka-streams:$versions.kafka_0110", kafkaStreams_10: "org.apache.kafka:kafka-streams:$versions.kafka_10", kafkaStreams_11: "org.apache.kafka:kafka-streams:$versions.kafka_11", diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java index 90d8db4d32013..dd0c6954088b4 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java @@ -34,6 +34,7 @@ import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnreleasedInstanceIdException; import org.apache.kafka.common.errors.UnsupportedAssignorException; +import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; @@ -186,6 +187,7 @@ import static org.apache.kafka.coordinator.group.classic.ClassicGroupState.STABLE; import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME; import static org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics.CONSUMER_GROUP_REBALANCES_SENSOR_NAME; +import static org.apache.kafka.coordinator.group.modern.ModernGroupMember.hasAssignedPartitionsChanged; import static org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember.hasAssignedPartitionsChanged; /** @@ -674,7 +676,8 @@ ConsumerGroup getOrMaybeCreateConsumerGroup( } else { if (group.type() == CONSUMER) { return (ConsumerGroup) group; - } else if (createIfNotExists && group.type() == CLASSIC && validateOnlineUpgrade((ClassicGroup) group)) { + } else if (createIfNotExists && group.type() == CLASSIC) { + validateOnlineUpgrade((ClassicGroup) group); return convertToConsumerGroup((ClassicGroup) group, records); } else { throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.", groupId)); @@ -1033,23 +1036,28 @@ private void convertToClassicGroup( * Validates the online upgrade if the Classic Group receives a ConsumerGroupHeartbeat request. * * @param classicGroup A ClassicGroup. - * @return A boolean indicating whether it's valid to online upgrade the classic group. + * @throws GroupIdNotFoundException if the group cannot be upgraded. */ - private boolean validateOnlineUpgrade(ClassicGroup classicGroup) { + private void validateOnlineUpgrade(ClassicGroup classicGroup) { if (!config.consumerGroupMigrationPolicy().isUpgradeEnabled()) { - log.info("Cannot upgrade classic group {} to consumer group because the online upgrade is disabled.", + log.info("Cannot upgrade classic group {} to consumer group because online upgrade is disabled.", classicGroup.groupId()); - return false; + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because online upgrade is disabled.", classicGroup.groupId()) + ); } else if (!classicGroup.usesConsumerGroupProtocol()) { log.info("Cannot upgrade classic group {} to consumer group because the group does not use the consumer embedded protocol.", classicGroup.groupId()); - return false; + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because the group does not use the consumer embedded protocol.", classicGroup.groupId()) + ); } else if (classicGroup.numMembers() > config.consumerGroupMaxSize()) { log.info("Cannot upgrade classic group {} to consumer group because the group size exceeds the consumer group maximum size.", classicGroup.groupId()); - return false; + throw new GroupIdNotFoundException( + String.format("Cannot upgrade classic group %s to consumer group because the group size exceeds the consumer group maximum size.", classicGroup.groupId()) + ); } - return true; } /** @@ -1078,12 +1086,21 @@ ConsumerGroup convertToConsumerGroup(ClassicGroup classicGroup, List testConsumerGroupHeartbeatWithCustomAssignorClassicGroupSource() { + return Stream.of( + Arguments.of(null, true), + Arguments.of(ByteBuffer.allocate(0), true), + Arguments.of(ByteBuffer.allocate(1), false) + ); + } + + @ParameterizedTest + @MethodSource("testConsumerGroupHeartbeatWithCustomAssignorClassicGroupSource") + public void testConsumerGroupHeartbeatWithCustomAssignorClassicGroup(ByteBuffer userData, boolean expectUpgrade) { + String groupId = "group-id"; + String memberId1 = "member-id-1"; + String memberId2 = "member-id-2"; + Uuid fooTopicId = Uuid.randomUuid(); + String fooTopicName = "foo"; + Uuid barTopicId = Uuid.randomUuid(); + String barTopicName = "bar"; + + MockPartitionAssignor assignor = new MockPartitionAssignor("range"); + assignor.prepareGroupAssignment(new GroupAssignment(Map.of( + memberId1, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(fooTopicId, 0) + )), + memberId2, new MemberAssignmentImpl(mkAssignment( + mkTopicAssignment(barTopicId, 0) + )) + ))); + + MetadataImage metadataImage = new MetadataImageBuilder() + .addTopic(fooTopicId, fooTopicName, 1) + .addTopic(barTopicId, barTopicName, 1) + .addRacks() + .build(); + + GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, ConsumerGroupMigrationPolicy.UPGRADE.toString()) + .withConfig(GroupCoordinatorConfig.CONSUMER_GROUP_ASSIGNORS_CONFIG, List.of(assignor)) + .withMetadataImage(metadataImage) + .build(); + + JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1); + protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol() + .setName("range") + .setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription( + List.of(fooTopicName, barTopicName), + null, + List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(barTopicName, 0) + ) + )))) + ); + + Map assignments = Map.of( + memberId1, + Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(List.of( + new TopicPartition(fooTopicName, 0), + new TopicPartition(barTopicName, 0) + ), userData))) + ); + + // Create a stable classic group with member 1. + ClassicGroup group = context.createClassicGroup(groupId); + group.setProtocolName(Optional.of("range")); + group.add( + new ClassicGroupMember( + memberId1, + Optional.empty(), + "client-id", + "client-host", + 10000, + 5000, + "consumer", + protocols, + assignments.get(memberId1) + ) + ); + + group.transitionTo(PREPARING_REBALANCE); + group.transitionTo(COMPLETING_REBALANCE); + group.transitionTo(STABLE); + + context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); + context.commit(); + group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); + + // A new member 2 with new protocol joins the classic group, triggering the upgrade. + ConsumerGroupHeartbeatRequestData consumerGroupHeartbeatRequestData = + new ConsumerGroupHeartbeatRequestData() + .setGroupId(groupId) + .setMemberId(memberId2) + .setRebalanceTimeoutMs(5000) + .setServerAssignor("range") + .setSubscribedTopicNames(List.of(fooTopicName, barTopicName)) + .setTopicPartitions(Collections.emptyList()); + + if (expectUpgrade) { + context.consumerGroupHeartbeat(consumerGroupHeartbeatRequestData); + } else { + Exception ex = assertThrows(GroupIdNotFoundException.class, () -> context.consumerGroupHeartbeat(consumerGroupHeartbeatRequestData)); + assertEquals( + "Cannot upgrade classic group group-id to consumer group because an unsupported custom assignor is in use. " + + "Please refer to the documentation or switch to a default assignor before re-attempting the upgrade.", ex.getMessage()); + } + } + @Test public void testConsumerGroupHeartbeatToClassicGroupFromExistingStaticMember() { String groupId = "group-id"; diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java index 1fc43cd2e14f8..6ef692229af1b 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java @@ -1915,7 +1915,14 @@ public void testCreateSnapshotAsLeaderWithInvalidSnapshotId(boolean withKip853Rp // When leader creating snapshot: // 1.1 high watermark cannot be empty assertEquals(OptionalLong.empty(), context.client.highWatermark()); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId1, 0)); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId1, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=4, epoch=2)) greater than the high-watermark (0)", + exception.getMessage() + ); // 1.2 high watermark must larger than or equal to the snapshotId's endOffset context.advanceLocalLeaderHighWatermarkToLogEndOffset(); @@ -1927,18 +1934,52 @@ public void testCreateSnapshotAsLeaderWithInvalidSnapshotId(boolean withKip853Rp context.client.poll(); assertEquals(context.log.endOffset().offset(), context.client.highWatermark().getAsLong() + newRecords.size()); - OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 2, currentEpoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId2, 0)); + OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + newRecords.size(), currentEpoch); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId2, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=7, epoch=3)) greater than the high-watermark (4)", + exception.getMessage() + ); // 2 the quorum epoch must larger than or equal to the snapshotId's epoch OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong(), currentEpoch + 1); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId3, 0)); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId3, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=4, epoch=4)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=7, epoch=3))", + exception.getMessage() + ); // 3 the snapshotId should be validated against endOffsetForEpoch OffsetAndEpoch endOffsetForEpoch = context.log.endOffsetForEpoch(epoch); assertEquals(epoch, endOffsetForEpoch.epoch()); - OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 2, epoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId4, 0)); + OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 1, epoch); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId4, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=4, epoch=2)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=3, epoch=2))", + exception.getMessage() + ); + + // 4 snapshotId offset must be at a batch boundary + context.advanceLocalLeaderHighWatermarkToLogEndOffset(); + OffsetAndEpoch invalidSnapshotId5 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() - 1, currentEpoch); + // this points to the "f" offset, which is not batch aligned + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId5, 0) + ); + assertEquals( + "Cannot create snapshot at offset (6) because it is not batch aligned. The batch containing the requested offset has a base offset of (4)", + exception.getMessage() + ); } @ParameterizedTest @@ -1951,6 +1992,7 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 Set voters = Set.of(localId, leaderId, otherFollowerId); RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters) + .appendToLog(1, List.of("a")) .withElectedLeader(epoch, leaderId) .withKip853Rpc(withKip853Rpc) .build(); @@ -1959,18 +2001,25 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 // When follower creating snapshot: // 1) The high watermark cannot be empty assertEquals(OptionalLong.empty(), context.client.highWatermark()); - OffsetAndEpoch invalidSnapshotId1 = new OffsetAndEpoch(1, 0); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId1, 0)); + OffsetAndEpoch invalidSnapshotId1 = new OffsetAndEpoch(1, 1); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId1, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=1, epoch=1)) greater than the high-watermark (0)", + exception.getMessage() + ); // Poll for our first fetch request context.pollUntilRequest(); RaftRequest.Outbound fetchRequest = context.assertSentFetchRequest(); assertTrue(voters.contains(fetchRequest.destination().id())); - context.assertFetchRequestData(fetchRequest, epoch, 0L, 0); + context.assertFetchRequestData(fetchRequest, epoch, 1L, 1); // The response does not advance the high watermark - List records1 = Arrays.asList("a", "b", "c"); - MemoryRecords batch1 = context.buildBatch(0L, 3, records1); + List records1 = Arrays.asList("b", "c"); + MemoryRecords batch1 = context.buildBatch(1L, 3, records1); context.deliverResponse( fetchRequest.correlationId(), fetchRequest.destination(), @@ -1981,11 +2030,14 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 // 2) The high watermark must be larger than or equal to the snapshotId's endOffset int currentEpoch = context.currentEpoch(); OffsetAndEpoch invalidSnapshotId2 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 1, currentEpoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId2, 0)); - - // 3) The quorum epoch must be larger than or equal to the snapshotId's epoch - OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong() + 1, currentEpoch + 1); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId3, 0)); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId2, 0) + ); + assertEquals( + "Cannot create a snapshot with an id (OffsetAndEpoch(offset=1, epoch=5)) greater than the high-watermark (0)", + exception.getMessage() + ); // The high watermark advances to be larger than log.endOffsetForEpoch(3), to test the case 3 context.pollUntilRequest(); @@ -1994,7 +2046,8 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 context.assertFetchRequestData(fetchRequest, epoch, 3L, 3); List records2 = Arrays.asList("d", "e", "f"); - MemoryRecords batch2 = context.buildBatch(3L, 4, records2); + int batch2Epoch = 4; + MemoryRecords batch2 = context.buildBatch(3L, batch2Epoch, records2); context.deliverResponse( fetchRequest.correlationId(), fetchRequest.destination(), @@ -2003,11 +2056,44 @@ public void testCreateSnapshotAsFollowerWithInvalidSnapshotId(boolean withKip853 context.client.poll(); assertEquals(6L, context.client.highWatermark().getAsLong()); + // 3) The quorum epoch must be larger than or equal to the snapshotId's epoch + OffsetAndEpoch invalidSnapshotId3 = new OffsetAndEpoch(context.client.highWatermark().getAsLong(), currentEpoch + 1); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId3, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=6, epoch=6)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=6, epoch=4))", + exception.getMessage() + ); + // 4) The snapshotId should be validated against endOffsetForEpoch OffsetAndEpoch endOffsetForEpoch = context.log.endOffsetForEpoch(3); assertEquals(3, endOffsetForEpoch.epoch()); - OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 1, epoch); - assertThrows(IllegalArgumentException.class, () -> context.client.createSnapshot(invalidSnapshotId4, 0)); + OffsetAndEpoch invalidSnapshotId4 = new OffsetAndEpoch(endOffsetForEpoch.offset() + 3, 3); + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId4, 0) + ); + assertEquals( + "Snapshot id (OffsetAndEpoch(offset=6, epoch=3)) is not valid according to the log: ValidOffsetAndEpoch(kind=DIVERGING, offsetAndEpoch=OffsetAndEpoch(offset=3, epoch=3))", + exception.getMessage() + ); + + // 5) The snapshotId should be batch-aligned + endOffsetForEpoch = context.log.endOffsetForEpoch(batch2Epoch); + assertEquals(4, endOffsetForEpoch.epoch()); + assertEquals(6, endOffsetForEpoch.offset()); + OffsetAndEpoch invalidSnapshotId5 = new OffsetAndEpoch(endOffsetForEpoch.offset() - 1, batch2Epoch); + // this points to the "f" offset, which is not batch aligned + exception = assertThrows( + IllegalArgumentException.class, + () -> context.client.createSnapshot(invalidSnapshotId5, 0) + ); + assertEquals( + "Cannot create snapshot at offset (5) because it is not batch aligned. The batch containing the requested offset has a base offset of (3)", + exception.getMessage() + ); } private static ReplicaKey replicaKey(int id, boolean withDirectoryId) { diff --git a/raft/src/test/java/org/apache/kafka/raft/MockLog.java b/raft/src/test/java/org/apache/kafka/raft/MockLog.java index 4695d18f72f3c..a7a8e89a88cfa 100644 --- a/raft/src/test/java/org/apache/kafka/raft/MockLog.java +++ b/raft/src/test/java/org/apache/kafka/raft/MockLog.java @@ -490,6 +490,18 @@ public Optional createNewSnapshot(OffsetAndEpoch snapshotId) ); } + long baseOffset = read(snapshotId.offset(), Isolation.COMMITTED).startOffsetMetadata.offset(); + if (snapshotId.offset() != baseOffset) { + throw new IllegalArgumentException( + String.format( + "Cannot create snapshot at offset (%s) because it is not batch aligned. " + + "The batch containing the requested offset has a base offset of (%s)", + snapshotId.offset(), + baseOffset + ) + ); + } + return createNewSnapshotUnchecked(snapshotId); } diff --git a/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java b/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java index 08e19866d9bd1..8306e103258e1 100644 --- a/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/MockLogTest.java @@ -450,13 +450,23 @@ public void testCreateSnapshotValidation() { // Test snapshot id for the first epoch log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, firstEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, firstEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(1, firstEpoch)).get().close(); // Test snapshot id for the second epoch log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords, secondEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords - 1, secondEpoch)).get().close(); - log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, secondEpoch)).get().close(); + } + + @Test + public void testCreateSnapshotInMiddleOfBatch() { + int numberOfRecords = 10; + int epoch = 1; + + appendBatch(numberOfRecords, epoch); + log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords)); + + assertThrows( + IllegalArgumentException.class, + () -> log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, epoch)) + ); } @Test diff --git a/settings.gradle b/settings.gradle index a2a7dacf1a2c9..dd76b7690251a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -84,9 +84,6 @@ include 'clients', 'streams:integration-tests', 'streams:streams-scala', 'streams:test-utils', - 'streams:upgrade-system-tests-0100', - 'streams:upgrade-system-tests-0101', - 'streams:upgrade-system-tests-0102', 'streams:upgrade-system-tests-0110', 'streams:upgrade-system-tests-10', 'streams:upgrade-system-tests-11', diff --git a/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java b/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java index c2502e31a488e..cce48cd0925ed 100644 --- a/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java +++ b/streams/src/main/java/org/apache/kafka/streams/internals/ApiUtils.java @@ -87,16 +87,4 @@ public static void checkSupplier(final Supplier supplier) { " %s#get() must return a new object each time it is called.", supplierClass, supplierClass)); } } - - /** - * @throws IllegalArgumentException if the same instance is obtained each time - */ - @SuppressWarnings("deprecation") - public static void checkSupplier(final org.apache.kafka.streams.kstream.ValueTransformerSupplier supplier) { - if (supplier.get() == supplier.get()) { - final String supplierClass = supplier.getClass().getName(); - throw new IllegalArgumentException(String.format("%s generates single reference." + - " %s#get() must return a new object each time it is called.", supplierClass, supplierClass)); - } - } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java index 5c8c5ef092d93..59ba928299e98 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/KStream.java @@ -431,8 +431,7 @@ KStream mapValues(final ValueMapperWithKey KStream flatMap(final KeyValueMapper>> mapper); @@ -481,8 +480,7 @@ KStream mapValues(final ValueMapperWithKey KStream flatMap(final KeyValueMapper>> mapper, final Named named); @@ -523,8 +521,7 @@ KStream flatMap(final KeyValueMapper KStream flatMapValues(final ValueMapper> mapper); @@ -565,8 +562,7 @@ KStream flatMap(final KeyValueMapper KStream flatMapValues(final ValueMapper> mapper, final Named named); @@ -612,8 +608,7 @@ KStream flatMapValues(final ValueMapper KStream flatMapValues(final ValueMapperWithKey> mapper); @@ -660,8 +655,7 @@ KStream flatMapValues(final ValueMapper KStream flatMapValues(final ValueMapperWithKey> mapper, final Named named); @@ -2972,491 +2966,6 @@ KStream leftJoin(final GlobalKTable globalTable, final KeyValueMapper keySelector, final ValueJoinerWithKey valueJoiner, final Named named); - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) Punctuator#punctuate()} - * the processing progress can be observed and additional periodic actions can be performed. - *

    - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

    - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

    {@code
    -     * // create store
    -     * StoreBuilder> keyValueStoreBuilder =
    -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                 Serdes.String(),
    -     *                 Serdes.String());
    -     * // add store
    -     * builder.addStateStore(keyValueStoreBuilder);
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerSupplier() {
    -     *     public ValueTransformer get() {
    -     *         return new MyValueTransformer();
    -     *     }
    -     * }, "myValueTransformState");
    -     * }
    - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
    {@code
    -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
    -     *     // supply transformer
    -     *     ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     *
    -     *     // provide store(s) that will be added and connected to the associated transformer
    -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
    -     *     Set stores() {
    -     *         StoreBuilder> keyValueStoreBuilder =
    -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                   Serdes.String(),
    -     *                   Serdes.String());
    -     *         return Collections.singleton(keyValueStoreBuilder);
    -     *     }
    -     * }
    -     *
    -     * ...
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformer());
    -     * }
    - *

    - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformer#transform(Object) - * transform()}. - * If the return value of {@link ValueTransformer#transform(Object) ValueTransformer#transform()} is an empty - * {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

    {@code
    -     * class MyValueTransformer implements ValueTransformer {
    -     *     private StateStore state;
    -     *
    -     *     void init(ProcessorContext context) {
    -     *         this.state = context.getStateStore("myValueTransformState");
    -     *         // punctuate each second, can access this.state
    -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
    -     *     }
    -     *
    -     *     Iterable transform(V value) {
    -     *         // can access this.state
    -     *         List result = new ArrayList<>();
    -     *         for (int i = 0; i < 3; i++) {
    -     *             result.add(new NewValueType(value));
    -     *         }
    -     *         return result; // values
    -     *     }
    -     *
    -     *     void close() {
    -     *         // can access this.state
    -     *     }
    -     * }
    -     * }
    - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

    - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerSupplier> valueTransformerSupplier, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformer} (provided by the given {@link ValueTransformerSupplier}) is applied to each input - * record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #mapValues(ValueMapper) mapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #mapValues(ValueMapper) mapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) Punctuator#punctuate()} - * the processing progress can be observed and additional periodic actions can be performed. - *

    - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

    - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

    {@code
    -     * // create store
    -     * StoreBuilder> keyValueStoreBuilder =
    -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                 Serdes.String(),
    -     *                 Serdes.String());
    -     * // add store
    -     * builder.addStateStore(keyValueStoreBuilder);
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerSupplier() {
    -     *     public ValueTransformer get() {
    -     *         return new MyValueTransformer();
    -     *     }
    -     * }, "myValueTransformState");
    -     * }
    - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
    {@code
    -     * class MyValueTransformerSupplier implements ValueTransformerSupplier {
    -     *     // supply transformer
    -     *     ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     *
    -     *     // provide store(s) that will be added and connected to the associated transformer
    -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
    -     *     Set stores() {
    -     *         StoreBuilder> keyValueStoreBuilder =
    -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                   Serdes.String(),
    -     *                   Serdes.String());
    -     *         return Collections.singleton(keyValueStoreBuilder);
    -     *     }
    -     * }
    -     *
    -     * ...
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformer());
    -     * }
    - *

    - * With either strategy, within the {@link ValueTransformer}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformer} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformer#transform(Object) - * transform()}. - * If the return value of {@link ValueTransformer#transform(Object) ValueTransformer#transform()} is an empty - * {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformer} tries to - * emit a {@link KeyValue} pair. - *

    {@code
    -     * class MyValueTransformer implements ValueTransformer {
    -     *     private StateStore state;
    -     *
    -     *     void init(ProcessorContext context) {
    -     *         this.state = context.getStateStore("myValueTransformState");
    -     *         // punctuate each second, can access this.state
    -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
    -     *     }
    -     *
    -     *     Iterable transform(V value) {
    -     *         // can access this.state
    -     *         List result = new ArrayList<>();
    -     *         for (int i = 0; i < 3; i++) {
    -     *             result.add(new NewValueType(value));
    -     *         }
    -     *         return result; // values
    -     *     }
    -     *
    -     *     void close() {
    -     *         // can access this.state
    -     *     }
    -     * }
    -     * }
    - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

    - * Setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerSupplier} that generates a newly constructed {@link ValueTransformer} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformer} object - * and returning the same object reference in {@link ValueTransformer} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerSupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #flatMapValues(ValueMapperWithKey) flatMapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #flatMapValues(ValueMapperWithKey) flatMapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress can - * be observed and additional periodic actions can be performed. - *

    - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

    - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

    {@code
    -     * // create store
    -     * StoreBuilder> keyValueStoreBuilder =
    -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                 Serdes.String(),
    -     *                 Serdes.String());
    -     * // add store
    -     * builder.addStateStore(keyValueStoreBuilder);
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerWithKeySupplier() {
    -     *     public ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     * }, "myValueTransformState");
    -     * }
    - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
    {@code
    -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
    -     *     // supply transformer
    -     *     ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     *
    -     *     // provide store(s) that will be added and connected to the associated transformer
    -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
    -     *     Set stores() {
    -     *         StoreBuilder> keyValueStoreBuilder =
    -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                   Serdes.String(),
    -     *                   Serdes.String());
    -     *         return Collections.singleton(keyValueStoreBuilder);
    -     *     }
    -     * }
    -     *
    -     * ...
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformerWithKey());
    -     * }
    - *

    - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformerWithKey#transform(Object, Object) - * transform()}. - * If the return value of {@link ValueTransformerWithKey#transform(Object, Object) ValueTransformerWithKey#transform()} - * is an empty {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

    {@code
    -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
    -     *     private StateStore state;
    -     *
    -     *     void init(ProcessorContext context) {
    -     *         this.state = context.getStateStore("myValueTransformState");
    -     *         // punctuate each second, can access this.state
    -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
    -     *     }
    -     *
    -     *     Iterable transform(K readOnlyKey, V value) {
    -     *         // can access this.state and use read-only key
    -     *         List result = new ArrayList<>();
    -     *         for (int i = 0; i < 3; i++) {
    -     *             result.add(new NewValueType(readOnlyKey));
    -     *         }
    -     *         return result; // values
    -     *     }
    -     *
    -     *     void close() {
    -     *         // can access this.state
    -     *     }
    -     * }
    -     * }
    - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

    - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final String... stateStoreNames); - - /** - * Transform the value of each input record into zero or more new values (with possibly a new - * type) and emit for each new value a record with the same key of the input record and the value. - * A {@link ValueTransformerWithKey} (provided by the given {@link ValueTransformerWithKeySupplier}) is applied to - * each input record value and computes zero or more new values. - * Thus, an input record {@code } can be transformed into output records {@code , , ...}. - * Attaching a state store makes this a stateful record-by-record operation (cf. {@link #flatMapValues(ValueMapperWithKey) flatMapValues()}). - * If you choose not to attach one, this operation is similar to the stateless {@link #flatMapValues(ValueMapperWithKey) flatMapValues()} - * but allows access to the {@code ProcessorContext} and record metadata. - * This is essentially mixing the Processor API into the DSL, and provides all the functionality of the PAPI. - * Furthermore, via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long)} the processing progress can - * be observed and additional periodic actions can be performed. - *

    - * In order for the transformer to use state stores, the stores must be added to the topology and connected to the - * transformer using at least one of two strategies (though it's not required to connect global state stores; read-only - * access to global state stores is available by default). - *

    - * The first strategy is to manually add the {@link StoreBuilder}s via {@link Topology#addStateStore(StoreBuilder, String...)}, - * and specify the store names via {@code stateStoreNames} so they will be connected to the transformer. - *

    {@code
    -     * // create store
    -     * StoreBuilder> keyValueStoreBuilder =
    -     *         Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                 Serdes.String(),
    -     *                 Serdes.String());
    -     * // add store
    -     * builder.addStateStore(keyValueStoreBuilder);
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new ValueTransformerWithKeySupplier() {
    -     *     public ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     * }, "myValueTransformState");
    -     * }
    - * The second strategy is for the given {@link ValueTransformerSupplier} to implement {@link ConnectedStoreProvider#stores()}, - * which provides the {@link StoreBuilder}s to be automatically added to the topology and connected to the transformer. - *
    {@code
    -     * class MyValueTransformerWithKeySupplier implements ValueTransformerWithKeySupplier {
    -     *     // supply transformer
    -     *     ValueTransformerWithKey get() {
    -     *         return new MyValueTransformerWithKey();
    -     *     }
    -     *
    -     *     // provide store(s) that will be added and connected to the associated transformer
    -     *     // the store name from the builder ("myValueTransformState") is used to access the store later via the ProcessorContext
    -     *     Set stores() {
    -     *         StoreBuilder> keyValueStoreBuilder =
    -     *                   Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("myValueTransformState"),
    -     *                   Serdes.String(),
    -     *                   Serdes.String());
    -     *         return Collections.singleton(keyValueStoreBuilder);
    -     *     }
    -     * }
    -     *
    -     * ...
    -     *
    -     * KStream outputStream = inputStream.flatTransformValues(new MyValueTransformerWithKey());
    -     * }
    - *

    - * With either strategy, within the {@link ValueTransformerWithKey}, the state is obtained via the {@link ProcessorContext}. - * To trigger periodic actions via {@link org.apache.kafka.streams.processor.Punctuator#punctuate(long) punctuate()}, - * a schedule must be registered. - * The {@link ValueTransformerWithKey} must return an {@link java.lang.Iterable} type (e.g., any - * {@link java.util.Collection} type) in {@link ValueTransformerWithKey#transform(Object, Object) - * transform()}. - * If the return value of {@link ValueTransformerWithKey#transform(Object, Object) ValueTransformerWithKey#transform()} - * is an empty {@link java.lang.Iterable Iterable} or {@code null}, no records are emitted. - * No additional {@link KeyValue} pairs can be emitted via - * {@link org.apache.kafka.streams.processor.ProcessorContext#forward(Object, Object) ProcessorContext.forward()}. - * A {@link org.apache.kafka.streams.errors.StreamsException} is thrown if the {@link ValueTransformerWithKey} tries - * to emit a {@link KeyValue} pair. - *

    {@code
    -     * class MyValueTransformerWithKey implements ValueTransformerWithKey {
    -     *     private StateStore state;
    -     *
    -     *     void init(ProcessorContext context) {
    -     *         this.state = context.getStateStore("myValueTransformState");
    -     *         // punctuate each second, can access this.state
    -     *         context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, new Punctuator(..));
    -     *     }
    -     *
    -     *     Iterable transform(K readOnlyKey, V value) {
    -     *         // can access this.state and use read-only key
    -     *         List result = new ArrayList<>();
    -     *         for (int i = 0; i < 3; i++) {
    -     *             result.add(new NewValueType(readOnlyKey));
    -     *         }
    -     *         return result; // values
    -     *     }
    -     *
    -     *     void close() {
    -     *         // can access this.state
    -     *     }
    -     * }
    -     * }
    - * Even if any upstream operation was key-changing, no auto-repartition is triggered. - * If repartitioning is required, a call to {@link #repartition()} should be performed before - * {@code flatTransformValues()}. - *

    - * Note that the key is read-only and should not be modified, as this can lead to corrupt partitioning. - * So, setting a new value preserves data co-location with respect to the key. - * Thus, no internal data redistribution is required if a key based operator (like an aggregation or join) - * is applied to the result {@code KStream}. - * - * @param valueTransformerSupplier an instance of {@link ValueTransformerWithKeySupplier} that generates a newly constructed {@link ValueTransformerWithKey} - * The supplier should always generate a new instance. Creating a single {@link ValueTransformerWithKey} object - * and returning the same object reference in {@link ValueTransformerWithKey} is a - * violation of the supplier pattern and leads to runtime exceptions. - * @param named a {@link Named} config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor; not required if the supplier - * implements {@link ConnectedStoreProvider#stores()} - * @param the value type of the result stream - * @return a {@code KStream} that contains more or less records with unmodified key and new values (possibly of - * different type) - * @see #mapValues(ValueMapper) - * @see #mapValues(ValueMapperWithKey) - * @deprecated Since 3.3. Use {@link KStream#processValues(FixedKeyProcessorSupplier, Named, String...)} instead. - */ - @Deprecated - KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames); /** * Process all records in this stream, one record at a time, by applying a diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java index 2483cbbbe1673..91a93d23a07e0 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/AbstractStream.java @@ -17,17 +17,12 @@ package org.apache.kafka.streams.kstream.internals; import org.apache.kafka.common.serialization.Serde; -import org.apache.kafka.streams.internals.ApiUtils; import org.apache.kafka.streams.kstream.ValueJoiner; import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; -import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.state.StoreBuilder; import java.util.Collection; import java.util.HashSet; @@ -109,40 +104,6 @@ static ValueMapperWithKey withKey(final ValueMapper return (readOnlyKey, value) -> valueMapper.apply(value); } - @SuppressWarnings("deprecation") - static ValueTransformerWithKeySupplier toValueTransformerWithKeySupplier( - final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - ApiUtils.checkSupplier(valueTransformerSupplier); - return new ValueTransformerWithKeySupplier() { - @Override - public ValueTransformerWithKey get() { - final org.apache.kafka.streams.kstream.ValueTransformer valueTransformer = valueTransformerSupplier.get(); - return new ValueTransformerWithKey() { - @Override - public void init(final ProcessorContext context) { - valueTransformer.init(context); - } - - @Override - public VR transform(final K readOnlyKey, final V value) { - return valueTransformer.transform(value); - } - - @Override - public void close() { - valueTransformer.close(); - } - }; - } - - @Override - public Set> stores() { - return valueTransformerSupplier.stores(); - } - }; - } - static ValueJoinerWithKey toValueJoinerWithKey(final ValueJoiner valueJoiner) { Objects.requireNonNull(valueJoiner, "joiner can't be null"); return (readOnlyKey, value1, value2) -> valueJoiner.apply(value1, value2); diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java deleted file mode 100644 index 5ce059990630a..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransform.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.processor.api.ContextualProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Set; - -public class KStreamFlatTransform implements ProcessorSupplier { - - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier; - - @SuppressWarnings("deprecation") - public KStreamFlatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier) { - this.transformerSupplier = transformerSupplier; - } - - @Override - public Processor get() { - return new KStreamFlatTransformProcessor<>(transformerSupplier.get()); - } - - @Override - public Set> stores() { - return transformerSupplier.stores(); - } - - public static class KStreamFlatTransformProcessor extends ContextualProcessor { - - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.Transformer>> transformer; - - @SuppressWarnings("deprecation") - public KStreamFlatTransformProcessor(final org.apache.kafka.streams.kstream.Transformer>> transformer) { - this.transformer = transformer; - } - - @Override - public void init(final ProcessorContext context) { - super.init(context); - transformer.init((InternalProcessorContext) context); - } - - @Override - public void process(final Record record) { - final Iterable> pairs = transformer.transform(record.key(), record.value()); - if (pairs != null) { - for (final KeyValue pair : pairs) { - context().forward(record.withKey(pair.key).withValue(pair.value)); - } - } - } - - @Override - public void close() { - transformer.close(); - } - } -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java deleted file mode 100644 index 5469c668dfee2..0000000000000 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValues.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.processor.api.ContextualProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.ProcessorContext; -import org.apache.kafka.streams.processor.api.ProcessorSupplier; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.state.StoreBuilder; - -import java.util.Set; - -public class KStreamFlatTransformValues implements ProcessorSupplier { - - private final ValueTransformerWithKeySupplier> valueTransformerSupplier; - - public KStreamFlatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerWithKeySupplier) { - this.valueTransformerSupplier = valueTransformerWithKeySupplier; - } - - @Override - public Processor get() { - return new KStreamFlatTransformValuesProcessor<>(valueTransformerSupplier.get()); - } - - @Override - public Set> stores() { - return valueTransformerSupplier.stores(); - } - - public static class KStreamFlatTransformValuesProcessor extends ContextualProcessor { - - private final ValueTransformerWithKey> valueTransformer; - - KStreamFlatTransformValuesProcessor(final ValueTransformerWithKey> valueTransformer) { - this.valueTransformer = valueTransformer; - } - - @Override - public void init(final ProcessorContext context) { - super.init(context); - valueTransformer.init(new ForwardingDisabledProcessorContext((InternalProcessorContext) context)); - } - - @Override - public void process(final Record record) { - final Iterable transformedValues = valueTransformer.transform(record.key(), record.value()); - if (transformedValues != null) { - for (final VOut transformedValue : transformedValues) { - context().forward(record.withValue(transformedValue)); - } - } - } - - @Override - public void close() { - super.close(); - valueTransformer.close(); - } - } - -} diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java index 820c31f29e43e..ab27cfc1ea19d 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamImpl.java @@ -41,7 +41,6 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.kstream.internals.graph.BaseRepartitionNode; import org.apache.kafka.streams.kstream.internals.graph.BaseRepartitionNode.BaseRepartitionNodeBuilder; import org.apache.kafka.streams.kstream.internals.graph.GraphNode; @@ -121,8 +120,6 @@ public class KStreamImpl extends AbstractStream implements KStream KStream doStreamTableJoin(final KTable table, builder); } - @Override - @Deprecated - public KStream flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier> valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - NamedInternal.empty(), - stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues( - toValueTransformerWithKeySupplier(valueTransformerSupplier), - named, - stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues(valueTransformerSupplier, NamedInternal.empty(), stateStoreNames); - } - - @Override - @Deprecated - public KStream flatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerSupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); - return doFlatTransformValues(valueTransformerSupplier, named, stateStoreNames); - } - - private KStream doFlatTransformValues(final ValueTransformerWithKeySupplier> valueTransformerWithKeySupplier, - final Named named, - final String... stateStoreNames) { - Objects.requireNonNull(stateStoreNames, "stateStoreNames can't be a null array"); - for (final String stateStoreName : stateStoreNames) { - Objects.requireNonNull(stateStoreName, "stateStoreNames can't contain `null` as store name"); - } - ApiUtils.checkSupplier(valueTransformerWithKeySupplier); - - final String name = new NamedInternal(named).orElseGenerateWithPrefix(builder, TRANSFORMVALUES_NAME); - final StatefulProcessorNode transformNode = new StatefulProcessorNode<>( - name, - new ProcessorParameters<>(new KStreamFlatTransformValues<>(valueTransformerWithKeySupplier), name), - stateStoreNames); - transformNode.setValueChangingOperation(true); - - builder.addGraphNode(graphNode, transformNode); - - // cannot inherit value serde - return new KStreamImpl<>( - name, - keySerde, - null, - subTopologySourceNodes, - repartitionRequired, - transformNode, - builder); - } - @Override @Deprecated public void process(final org.apache.kafka.streams.processor.ProcessorSupplier processorSupplier, diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java b/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java index 108a7d7233bf3..ad3a834257d1b 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/ConnectedStoreProvider.java @@ -19,7 +19,6 @@ import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Named; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.processor.api.FixedKeyProcessorSupplier; import org.apache.kafka.streams.state.StoreBuilder; @@ -92,11 +91,8 @@ * @see Topology#addProcessor(String, org.apache.kafka.streams.processor.api.ProcessorSupplier, String...) * @see KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, String...) * @see KStream#process(org.apache.kafka.streams.processor.api.ProcessorSupplier, Named, String...) - * @see KStream#processValues(FixedKeyProcessorSupplier, String...) - * @see KStream#flatTransformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, String...) - * @see KStream#flatTransformValues(org.apache.kafka.streams.kstream.ValueTransformerSupplier, Named, String...) - * @see KStream#flatTransformValues(ValueTransformerWithKeySupplier, String...) - * @see KStream#flatTransformValues(ValueTransformerWithKeySupplier, Named, String...) + * @see KStream#processValues(FixedKeyProcessorSupplier, String...) + * @see KStream#processValues(FixedKeyProcessorSupplier, Named, String...) */ public interface ConnectedStoreProvider { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java index 6c973e096fc27..1dffc4ebbd3ff 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreator.java @@ -119,7 +119,8 @@ private Producer producer() { } public void reInitializeProducer() { - streamsProducer.resetProducer(producer()); + if (!streamsProducer.isClosed()) + streamsProducer.resetProducer(producer()); } StreamsProducer streamsProducer() { diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java index 1048b5a2ecfdf..546186b2dbd15 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsProducer.java @@ -70,6 +70,7 @@ public class StreamsProducer { private Producer producer; private boolean transactionInFlight = false; private boolean transactionInitialized = false; + private boolean closed = false; private double oldProducerTotalBlockedTime = 0; // we have a single `StreamsProducer` per thread, and thus a single `sendException` instance, // which we share across all tasks, ie, all `RecordCollectorImpl` @@ -98,6 +99,10 @@ boolean transactionInFlight() { return transactionInFlight; } + boolean isClosed() { + return closed; + } + /** * @throws IllegalStateException if EOS is disabled */ @@ -320,6 +325,7 @@ void flush() { void close() { producer.close(); + closed = true; transactionInFlight = false; transactionInitialized = false; } diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java index 0c9031afbdfc6..056721fa8af1d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/StreamsBuilderTest.java @@ -61,12 +61,11 @@ import org.apache.kafka.streams.state.internals.WrappedStateStore; import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper; import org.apache.kafka.streams.utils.TestUtils.RecordingProcessorWrapper.WrapperRecorder; +import org.apache.kafka.test.MockApiFixedKeyProcessorSupplier; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockMapper; import org.apache.kafka.test.MockPredicate; import org.apache.kafka.test.MockValueJoiner; -import org.apache.kafka.test.NoopValueTransformer; -import org.apache.kafka.test.NoopValueTransformerWithKey; import org.apache.kafka.test.StreamsTestUtils; import org.hamcrest.CoreMatchers; @@ -1315,29 +1314,21 @@ public void shouldUseSpecifiedNameForProcessOperation() { } @Test - public void shouldUseSpecifiedNameForPrintOperation() { - builder.stream(STREAM_TOPIC).print(Printed.toSysOut().withName("print-processor")); - builder.build(); - final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "print-processor"); - } + public void shouldUseSpecifiedNameForProcessValuesOperation() { + builder.stream(STREAM_TOPIC) + .processValues(new MockApiFixedKeyProcessorSupplier<>(), Named.as("test-fixed-key-processor")); - @Test - @SuppressWarnings("deprecation") - public void shouldUseSpecifiedNameForFlatTransformValueOperation() { - builder.stream(STREAM_TOPIC).flatTransformValues(() -> new NoopValueTransformer<>(), Named.as(STREAM_OPERATION_NAME)); builder.build(); final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); + assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "test-fixed-key-processor"); } @Test - @SuppressWarnings({"unchecked", "rawtypes", "deprecation"}) - public void shouldUseSpecifiedNameForFlatTransformValueWithKeyOperation() { - builder.stream(STREAM_TOPIC).flatTransformValues(() -> new NoopValueTransformerWithKey(), Named.as(STREAM_OPERATION_NAME)); + public void shouldUseSpecifiedNameForPrintOperation() { + builder.stream(STREAM_TOPIC).print(Printed.toSysOut().withName("print-processor")); builder.build(); final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(props)).buildTopology(); - assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", STREAM_OPERATION_NAME); + assertNamesForOperation(topology, "KSTREAM-SOURCE-0000000000", "print-processor"); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java index ca4fd756cbc1b..01e833f1b976b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java @@ -32,7 +32,6 @@ import org.apache.kafka.streams.processor.api.ProcessorSupplier; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.test.MockApiProcessorSupplier; -import org.apache.kafka.test.NoopValueTransformer; import org.apache.kafka.test.NoopValueTransformerWithKey; import org.junit.jupiter.api.Test; @@ -51,21 +50,6 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class AbstractStreamTest { - @SuppressWarnings("deprecation") - @Test - public void testToInternalValueTransformerSupplierSuppliesNewTransformers() { - final org.apache.kafka.streams.kstream.ValueTransformerSupplier valueTransformerSupplier = - mock(org.apache.kafka.streams.kstream.ValueTransformerSupplier.class); - when(valueTransformerSupplier.get()) - .thenReturn(new NoopValueTransformer<>()) - .thenReturn(new NoopValueTransformer<>()); - final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier = - AbstractStream.toValueTransformerWithKeySupplier(valueTransformerSupplier); - valueTransformerWithKeySupplier.get(); - valueTransformerWithKeySupplier.get(); - valueTransformerWithKeySupplier.get(); - } - @Test public void testToInternalValueTransformerWithKeySupplierSuppliesNewTransformers() { final ValueTransformerWithKeySupplier valueTransformerWithKeySupplier = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java deleted file mode 100644 index 5335128aa459d..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.KeyValue; -import org.apache.kafka.streams.kstream.internals.KStreamFlatTransform.KStreamFlatTransformProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentMatchers; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) -@SuppressWarnings("deprecation") -public class KStreamFlatTransformTest { - - private Number inputKey; - private Number inputValue; - - @Mock - private org.apache.kafka.streams.kstream.Transformer>> transformer; - @Mock - private InternalProcessorContext context; - private InOrder inOrder; - - private KStreamFlatTransformProcessor processor; - - @BeforeEach - public void setUp() { - inputKey = 1; - inputValue = 10; - inOrder = inOrder(context); - processor = new KStreamFlatTransformProcessor<>(transformer); - } - - @Test - public void shouldInitialiseFlatTransformProcessor() { - processor.init(context); - - verify(transformer).init(context); - } - - @Test - public void shouldTransformInputRecordToMultipleOutputRecords() { - final Iterable> outputRecords = Arrays.asList( - KeyValue.pair(2, 20), - KeyValue.pair(3, 30), - KeyValue.pair(4, 40)); - - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(outputRecords); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - for (final KeyValue outputRecord : outputRecords) { - inOrder.verify(context).forward(new Record<>(outputRecord.key, outputRecord.value, 0L)); - } - } - - @Test - public void shouldAllowEmptyListAsResultOfTransform() { - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(Collections.emptyList()); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldAllowNullAsResultOfTransform() { - processor.init(context); - - when(transformer.transform(inputKey, inputValue)).thenReturn(null); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldCloseFlatTransformProcessor() { - processor.close(); - - verify(transformer).close(); - } - - @Test - public void shouldGetFlatTransformProcessor() { - @SuppressWarnings("unchecked") - final org.apache.kafka.streams.kstream.TransformerSupplier>> transformerSupplier = - mock(org.apache.kafka.streams.kstream.TransformerSupplier.class); - final KStreamFlatTransform processorSupplier = - new KStreamFlatTransform<>(transformerSupplier); - - when(transformerSupplier.get()).thenReturn(transformer); - - final Processor processor = processorSupplier.get(); - - assertInstanceOf(KStreamFlatTransformProcessor.class, processor); - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java deleted file mode 100644 index 50a636f349db3..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamFlatTransformValuesTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.kstream.internals; - -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; -import org.apache.kafka.streams.kstream.internals.KStreamFlatTransformValues.KStreamFlatTransformValuesProcessor; -import org.apache.kafka.streams.processor.api.Processor; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.ForwardingDisabledProcessorContext; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentMatchers; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.mockito.junit.jupiter.MockitoSettings; -import org.mockito.quality.Strictness; - -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@ExtendWith(MockitoExtension.class) -@MockitoSettings(strictness = Strictness.STRICT_STUBS) -public class KStreamFlatTransformValuesTest { - - private Integer inputKey; - private Integer inputValue; - - @Mock - private ValueTransformerWithKey> valueTransformer; - @Mock - private InternalProcessorContext context; - private InOrder inOrder; - - private KStreamFlatTransformValuesProcessor processor; - - @BeforeEach - public void setUp() { - inputKey = 1; - inputValue = 10; - inOrder = inOrder(context); - processor = new KStreamFlatTransformValuesProcessor<>(valueTransformer); - } - - @Test - public void shouldInitializeFlatTransformValuesProcessor() { - processor.init(context); - - verify(valueTransformer).init(ArgumentMatchers.isA(ForwardingDisabledProcessorContext.class)); - } - - @Test - public void shouldTransformInputRecordToMultipleOutputValues() { - final Iterable outputValues = Arrays.asList( - "Hello", - "Blue", - "Planet"); - - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(outputValues); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - for (final String outputValue : outputValues) { - inOrder.verify(context).forward(new Record<>(inputKey, outputValue, 0L)); - } - } - - @Test - public void shouldEmitNoRecordIfTransformReturnsEmptyList() { - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(Collections.emptyList()); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldEmitNoRecordIfTransformReturnsNull() { - processor.init(context); - - when(valueTransformer.transform(inputKey, inputValue)).thenReturn(null); - - processor.process(new Record<>(inputKey, inputValue, 0L)); - - inOrder.verify(context, never()).forward(ArgumentMatchers.>any()); - } - - @Test - public void shouldCloseFlatTransformValuesProcessor() { - processor.close(); - - verify(valueTransformer).close(); - } - - @Test - public void shouldGetFlatTransformValuesProcessor() { - @SuppressWarnings("unchecked") - final ValueTransformerWithKeySupplier> valueTransformerSupplier = - mock(ValueTransformerWithKeySupplier.class); - final KStreamFlatTransformValues processorSupplier = - new KStreamFlatTransformValues<>(valueTransformerSupplier); - - when(valueTransformerSupplier.get()).thenReturn(valueTransformer); - - final Processor processor = processorSupplier.get(); - - assertInstanceOf(KStreamFlatTransformValuesProcessor.class, processor); - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java index b78696f259ade..8d2a280be4e8e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplTest.java @@ -48,8 +48,6 @@ import org.apache.kafka.streams.kstream.ValueJoinerWithKey; import org.apache.kafka.streams.kstream.ValueMapper; import org.apache.kafka.streams.kstream.ValueMapperWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKey; -import org.apache.kafka.streams.kstream.ValueTransformerWithKeySupplier; import org.apache.kafka.streams.processor.FailOnInvalidTimestamp; import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.TopicNameExtractor; @@ -113,33 +111,6 @@ public class KStreamImplTest { private final Consumed stringConsumed = Consumed.with(Serdes.String(), Serdes.String()); private final MockApiProcessorSupplier processorSupplier = new MockApiProcessorSupplier<>(); private final MockApiFixedKeyProcessorSupplier fixedKeyProcessorSupplier = new MockApiFixedKeyProcessorSupplier<>(); - @SuppressWarnings("deprecation") - private final org.apache.kafka.streams.kstream.ValueTransformerSupplier> flatValueTransformerSupplier = - () -> new org.apache.kafka.streams.kstream.ValueTransformer>() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public Iterable transform(final String value) { - return Collections.singleton(value); - } - - @Override - public void close() {} - }; - private final ValueTransformerWithKeySupplier> flatValueTransformerWithKeySupplier = - () -> new ValueTransformerWithKey>() { - @Override - public void init(final ProcessorContext context) {} - - @Override - public Iterable transform(final String key, final String value) { - return Collections.singleton(value); - } - - @Override - public void close() {} - }; private StreamsBuilder builder; private KStream testStream; @@ -1619,230 +1590,6 @@ public void shouldNotAllowBadProcessSupplierOnProcessValuesWithNamedAndStores() assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); } - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues((org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValues() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues((ValueTransformerWithKeySupplier>) null)); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - Named.as("flatValueTransformer"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - Named.as("flatValueWithKeyTransformer"))); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (org.apache.kafka.streams.kstream.ValueTransformerSupplier>) null, - Named.as("flatValueTransformer"), - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValuesWithNamedAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - (ValueTransformerWithKeySupplier>) null, - Named.as("flatValueWitKeyTransformer"), - "stateStore")); - assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueSupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - Named.as("flatValueTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNamesOnFlatTransformValuesWithFlatValueWithKeySupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - Named.as("flatValueWitKeyTransformer"), - (String[]) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueSupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - Named.as("flatValueTransformer"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueWithKeySupplierAndNamed() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - Named.as("flatValueWitKeyTransformer"), - (String) null)); - assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueSupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueWithKeySupplier() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (Named) null)); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueSupplierAndStores() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerSupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - - @Test - @SuppressWarnings("deprecation") - public void shouldNotAllowNullNamedOnFlatTransformValuesWithFlatValueWithKeySupplierAndStore() { - final NullPointerException exception = assertThrows( - NullPointerException.class, - () -> testStream.flatTransformValues( - flatValueTransformerWithKeySupplier, - (Named) null, - "storeName")); - assertThat(exception.getMessage(), equalTo("named can't be null")); - } - @Test public void shouldNotAllowNullProcessSupplierOnProcess() { final NullPointerException exception = assertThrows( diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java index 6a4339a3ed78a..73da4b7cc050b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ActiveTaskCreatorTest.java @@ -190,9 +190,23 @@ public void shouldCloseIfEosV2Enabled() { activeTaskCreator.close(); + assertThat(activeTaskCreator.streamsProducer().isClosed(), is(true)); assertThat(mockClientSupplier.producers.get(0).closed(), is(true)); } + @Test + public void shouldNotReInitializeProducerOnClose() { + properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); + mockClientSupplier.setApplicationIdForProducer("appId"); + createTasks(); + + activeTaskCreator.streamsProducer().close(); + activeTaskCreator.reInitializeProducer(); + // If streamsProducer is not closed, clientSupplier will recreate a producer, + // resulting in more than one producer being created. + assertThat(mockClientSupplier.producers.size(), is(1)); + } + // error handling @Test diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala index 80d43fc2315b2..9db92525ced22 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala @@ -17,14 +17,7 @@ package org.apache.kafka.streams.scala package kstream -import org.apache.kafka.streams.kstream.{ - GlobalKTable, - JoinWindows, - KStream => KStreamJ, - Printed, - ValueTransformerSupplier, - ValueTransformerWithKeySupplier -} +import org.apache.kafka.streams.kstream.{GlobalKTable, JoinWindows, KStream => KStreamJ, Printed} import org.apache.kafka.streams.processor.TopicNameExtractor import org.apache.kafka.streams.processor.api.{FixedKeyProcessorSupplier, ProcessorSupplier} import org.apache.kafka.streams.scala.FunctionsCompatConversions.{ @@ -35,9 +28,7 @@ import org.apache.kafka.streams.scala.FunctionsCompatConversions.{ MapperFromFunction, PredicateFromFunction, ValueMapperFromFunction, - ValueMapperWithKeyFromFunction, - ValueTransformerSupplierAsJava, - ValueTransformerSupplierWithKeyAsJava + ValueMapperWithKeyFromFunction } import scala.jdk.CollectionConverters._ @@ -492,98 +483,6 @@ class KStream[K, V](val inner: KStreamJ[K, V]) { def toTable(named: Named, materialized: Materialized[K, V, ByteArrayKeyValueStore]): KTable[K, V] = new KTable(inner.toTable(named, materialized)) - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, Iterable[VR]], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerSupplier` that generates a `ValueTransformer` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerSupplier[V, Iterable[VR]], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, named, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, Iterable[VR]], - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, stateStoreNames: _*)) - - /** - * Transform the value of each input record into zero or more records (with possible new type) in the - * output stream. - * A `ValueTransformer` (provided by the given `ValueTransformerSupplier`) is applied to each input - * record value and computes a new value for it. - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `ValueTransformer`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param valueTransformerSupplier a instance of `ValueTransformerWithKeySupplier` that generates a `ValueTransformerWithKey` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state stores used by the processor - * @return a [[KStream]] that contains records with unmodified key and new values (possibly of different type) - * @see `org.apache.kafka.streams.kstream.KStream#transformValues` - */ - @deprecated(since = "3.3", message = "Use processValues(FixedKeyProcessorSupplier, Named, String*) instead.") - def flatTransformValues[VR]( - valueTransformerSupplier: ValueTransformerWithKeySupplier[K, V, Iterable[VR]], - named: Named, - stateStoreNames: String* - ): KStream[K, VR] = - new KStream(inner.flatTransformValues[VR](valueTransformerSupplier.asJava, named, stateStoreNames: _*)) - /** * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given * `processorSupplier`). diff --git a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala index 88fe8e8980d15..6a0b6c1b0e988 100644 --- a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala +++ b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala @@ -18,17 +18,9 @@ package org.apache.kafka.streams.scala.kstream import java.time.Duration.ofSeconds import java.time.{Duration, Instant} -import org.apache.kafka.streams.kstream.{ - JoinWindows, - Named, - ValueTransformer, - ValueTransformerSupplier, - ValueTransformerWithKey, - ValueTransformerWithKeySupplier -} +import org.apache.kafka.streams.kstream.{JoinWindows, Named} import org.apache.kafka.streams.processor.api -import org.apache.kafka.streams.processor.ProcessorContext -import org.apache.kafka.streams.processor.api.{Processor, ProcessorSupplier} +import org.apache.kafka.streams.processor.api.{FixedKeyRecord, Processor, ProcessorSupplier} import org.apache.kafka.streams.scala.ImplicitConversions._ import org.apache.kafka.streams.scala.serialization.Serdes._ import org.apache.kafka.streams.scala.StreamsBuilder @@ -40,7 +32,6 @@ import org.junit.jupiter.api.Test import java.util import java.util.Collections -import scala.annotation.nowarn import scala.jdk.CollectionConverters._ class KStreamTest extends TestDriver { @@ -287,64 +278,29 @@ class KStreamTest extends TestDriver { testDriver.close() } - @nowarn - @Test - def testCorrectlyFlatTransformValuesInRecords(): Unit = { - class TestTransformer extends ValueTransformer[String, Iterable[String]] { - override def init(context: ProcessorContext): Unit = {} - - override def transform(value: String): Iterable[String] = - Array(s"$value-transformed") - - override def close(): Unit = {} - } - val builder = new StreamsBuilder() - val sourceTopic = "source" - val sinkTopic = "sink" - - val stream = builder.stream[String, String](sourceTopic) - stream - .flatTransformValues(new ValueTransformerSupplier[String, Iterable[String]] { - def get(): ValueTransformer[String, Iterable[String]] = - new TestTransformer - }) - .to(sinkTopic) - - val now = Instant.now() - val testDriver = createTestDriver(builder, now) - val testInput = testDriver.createInput[String, String](sourceTopic) - val testOutput = testDriver.createOutput[String, String](sinkTopic) - - testInput.pipeInput("1", "value", now) - - assertEquals("value-transformed", testOutput.readValue) - - assertTrue(testOutput.isEmpty) - - testDriver.close() - } - - @nowarn @Test - def testCorrectlyFlatTransformValuesInRecordsWithKey(): Unit = { - class TestTransformer extends ValueTransformerWithKey[String, String, Iterable[String]] { - override def init(context: ProcessorContext): Unit = {} - - override def transform(key: String, value: String): Iterable[String] = - Array(s"$value-transformed-$key") + def testProcessValuesCorrectlyRecords(): Unit = { + val processorSupplier: api.FixedKeyProcessorSupplier[String, String, String] = + () => + new api.FixedKeyProcessor[String, String, String] { + private var context: api.FixedKeyProcessorContext[String, String] = _ + + override def init(context: api.FixedKeyProcessorContext[String, String]): Unit = + this.context = context + + override def process(record: FixedKeyRecord[String, String]): Unit = { + val processedValue = s"${record.value()}-processed" + context.forward(record.withValue(processedValue)) + } + } - override def close(): Unit = {} - } val builder = new StreamsBuilder() val sourceTopic = "source" val sinkTopic = "sink" val stream = builder.stream[String, String](sourceTopic) stream - .flatTransformValues(new ValueTransformerWithKeySupplier[String, String, Iterable[String]] { - def get(): ValueTransformerWithKey[String, String, Iterable[String]] = - new TestTransformer - }) + .processValues(processorSupplier) .to(sinkTopic) val now = Instant.now() @@ -354,7 +310,9 @@ class KStreamTest extends TestDriver { testInput.pipeInput("1", "value", now) - assertEquals("value-transformed-1", testOutput.readValue) + val result = testOutput.readKeyValue() + assertEquals("value-processed", result.value) + assertEquals("1", result.key) assertTrue(testOutput.isEmpty) diff --git a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 27712cc5ace4e..0000000000000 --- a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] + " " : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.0)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - System.out.println("closing Kafka Streams instance"); - System.out.flush(); - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.0] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 1528b2c472bbb..0000000000000 --- a/streams/upgrade-system-tests-0100/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] : "")); - } - - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.0)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 379720b956273..0000000000000 --- a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - /** - * This test cannot be executed, as long as Kafka 0.10.1.2 is not released - */ - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] + " " : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.1)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - System.out.println("closing Kafka Streams instance"); - System.out.flush(); - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.1] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 4efe70911abe5..0000000000000 --- a/streams/upgrade-system-tests-0101/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 2) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires two arguments (zookeeper-url, properties-file) but only " + args.length + " provided: " - + (args.length > 0 ? args[0] : "")); - } - final String zookeeper = args[0]; - final String propFileName = args[1]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.1)"); - System.out.println("zookeeper=" + zookeeper); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.setProperty(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeper); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java deleted file mode 100644 index 75e548439ceb5..0000000000000 --- a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; -import org.apache.kafka.streams.processor.AbstractProcessor; -import org.apache.kafka.streams.processor.Processor; -import org.apache.kafka.streams.processor.ProcessorContext; -import org.apache.kafka.streams.processor.ProcessorSupplier; - -import java.util.Properties; - -public class StreamsUpgradeTest { - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 1) { - System.err.println("StreamsUpgradeTest requires one argument (properties-file) but provided none"); - } - final String propFileName = args[0]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - - System.out.println("StreamsTest instance started (StreamsUpgradeTest v0.10.2)"); - System.out.println("props=" + streamsProperties); - - final KStreamBuilder builder = new KStreamBuilder(); - final KStream dataStream = builder.stream("data"); - dataStream.process(printProcessorSupplier()); - dataStream.to("echo"); - - final Properties config = new Properties(); - config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsUpgradeTest"); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final KafkaStreams streams = new KafkaStreams(builder, config); - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - streams.close(); - System.out.println("UPGRADE-TEST-CLIENT-CLOSED"); - System.out.flush(); - } - }); - } - - private static ProcessorSupplier printProcessorSupplier() { - return new ProcessorSupplier() { - public Processor get() { - return new AbstractProcessor() { - private int numRecordsProcessed = 0; - - @Override - public void init(final ProcessorContext context) { - System.out.println("[0.10.2] initializing processor: topic=data taskId=" + context.taskId()); - numRecordsProcessed = 0; - } - - @Override - public void process(final K key, final V value) { - numRecordsProcessed++; - if (numRecordsProcessed % 100 == 0) { - System.out.println("processed " + numRecordsProcessed + " records from topic=data"); - } - } - - @Override - public void punctuate(final long timestamp) {} - - @Override - public void close() {} - }; - } - }; - } -} diff --git a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java b/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java deleted file mode 100644 index 1cc115f3c061d..0000000000000 --- a/streams/upgrade-system-tests-0102/src/test/java/org/apache/kafka/streams/tests/StreamsUpgradeToCooperativeRebalanceTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.tests; - -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.kstream.ForeachAction; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KStreamBuilder; - -import java.util.Properties; - -public class StreamsUpgradeToCooperativeRebalanceTest { - - - @SuppressWarnings("unchecked") - public static void main(final String[] args) throws Exception { - if (args.length < 1) { - System.err.println("StreamsUpgradeToCooperativeRebalanceTest requires one argument (properties-file) but none provided"); - } - final String propFileName = args[0]; - - final Properties streamsProperties = Utils.loadProps(propFileName); - final Properties config = new Properties(); - - System.out.println("StreamsTest instance started (StreamsUpgradeToCooperativeRebalanceTest v0.10.2)"); - System.out.println("props=" + config); - - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "cooperative-rebalance-upgrade"); - config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); - config.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000L); - config.putAll(streamsProperties); - - final String sourceTopic = config.getProperty("source.topic", "source"); - final String sinkTopic = config.getProperty("sink.topic", "sink"); - final int reportInterval = Integer.parseInt(config.getProperty("report.interval", "100")); - final String upgradePhase = config.getProperty("upgrade.phase", ""); - - final KStreamBuilder builder = new KStreamBuilder(); - - final KStream upgradeStream = builder.stream(sourceTopic); - upgradeStream.foreach(new ForeachAction() { - int recordCounter = 0; - - @Override - public void apply(final String key, final String value) { - if (recordCounter++ % reportInterval == 0) { - System.out.printf("%sProcessed %d records so far%n", upgradePhase, recordCounter); - System.out.flush(); - } - } - } - ); - upgradeStream.to(sinkTopic); - - final KafkaStreams streams = new KafkaStreams(builder, config); - - - streams.start(); - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - streams.close(); - System.out.printf("%sCOOPERATIVE-REBALANCE-TEST-CLIENT-CLOSED%n", upgradePhase); - System.out.flush(); - })); - } -} diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/Agent.java b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/Agent.java index 25199f7a9b46e..0ef3c5e57454f 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/Agent.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/Agent.java @@ -41,9 +41,6 @@ import net.sourceforge.argparse4j.inf.ArgumentParserException; import net.sourceforge.argparse4j.inf.Namespace; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.PrintStream; import java.util.Set; @@ -55,8 +52,6 @@ * The agent process runs tasks. */ public final class Agent { - private static final Logger log = LoggerFactory.getLogger(Agent.class); - /** * The default Agent port. */ @@ -160,7 +155,7 @@ TaskSpec rebaseTaskSpecTime(TaskSpec spec) throws Exception { /** * Start a task on the agent, and block until it completes. * - * @param spec The task specifiction. + * @param spec The task specification. * @param out The output stream to print to. * * @return True if the task run successfully; false otherwise. @@ -248,16 +243,16 @@ public static void main(String[] args) throws Exception { JsonRestServer restServer = new JsonRestServer(Node.Util.getTrogdorAgentPort(platform.curNode())); AgentRestResource resource = new AgentRestResource(); - log.info("Starting agent process."); + System.out.println("Starting agent process."); final Agent agent = new Agent(platform, Scheduler.SYSTEM, restServer, resource); restServer.start(resource); Exit.addShutdownHook("agent-shutdown-hook", () -> { - log.warn("Running agent shutdown hook."); + System.out.println("Running agent shutdown hook."); try { agent.beginShutdown(); agent.waitForShutdown(); } catch (Exception e) { - log.error("Got exception while running agent shutdown hook.", e); + System.out.println("Got exception while running agent shutdown hook. " + e); } }); if (taskSpec != null) { diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/Coordinator.java b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/Coordinator.java index 9bf8a767cd0c5..b39969e7a9789 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/Coordinator.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/Coordinator.java @@ -38,9 +38,6 @@ import net.sourceforge.argparse4j.inf.ArgumentParserException; import net.sourceforge.argparse4j.inf.Namespace; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.ThreadLocalRandom; import static net.sourceforge.argparse4j.impl.Arguments.store; @@ -51,7 +48,6 @@ * The coordinator manages the agent processes in the cluster. */ public final class Coordinator { - private static final Logger log = LoggerFactory.getLogger(Coordinator.class); public static final int DEFAULT_PORT = 8889; @@ -169,17 +165,17 @@ public static void main(String[] args) throws Exception { JsonRestServer restServer = new JsonRestServer( Node.Util.getTrogdorCoordinatorPort(platform.curNode())); CoordinatorRestResource resource = new CoordinatorRestResource(); - log.info("Starting coordinator process."); + System.out.println("Starting coordinator process."); final Coordinator coordinator = new Coordinator(platform, Scheduler.SYSTEM, restServer, resource, ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE / 2)); restServer.start(resource); Exit.addShutdownHook("coordinator-shutdown-hook", () -> { - log.warn("Running coordinator shutdown hook."); + System.out.println("Running coordinator shutdown hook."); try { coordinator.beginShutdown(false); coordinator.waitForShutdown(); } catch (Exception e) { - log.error("Got exception while running coordinator shutdown hook.", e); + System.out.println("Got exception while running coordinator shutdown hook. " + e); } }); coordinator.waitForShutdown();