diff --git a/.github/scripts/develocity_reports.py b/.github/scripts/develocity_reports.py new file mode 100644 index 0000000000000..38cf9c47d04c3 --- /dev/null +++ b/.github/scripts/develocity_reports.py @@ -0,0 +1,955 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import requests +import json +from dataclasses import dataclass, field, asdict +from typing import Dict, List, Tuple, Optional +from datetime import datetime, timedelta +import pytz # Add this import for timezone handling +from collections import defaultdict +import time +import logging +import concurrent.futures # Add this import at the top +import pathlib +import pickle +from abc import ABC, abstractmethod + +logger = logging.getLogger(__name__) + +@dataclass +class TestOutcome: + passed: int + failed: int + skipped: int + flaky: int + not_selected: int = field(metadata={'name': 'notSelected'}) + total: int + +@dataclass +class BuildInfo: + id: str + timestamp: datetime + duration: int + has_failed: bool + +@dataclass +class TestTimelineEntry: + build_id: str + timestamp: datetime + outcome: str # "passed", "failed", "flaky", etc. + +@dataclass +class TestResult: + name: str + outcome_distribution: TestOutcome + first_seen: datetime + timeline: List[TestTimelineEntry] = field(default_factory=list) + recent_failure_rate: float = 0.0 # Added to track recent failure trends + +@dataclass +class TestContainerResult: + build_id: str + outcome: str + timestamp: Optional[datetime] = None + +@dataclass +class TestCaseResult(TestResult): + """Extends TestResult to include container-specific information""" + container_name: str = "" + +@dataclass +class BuildCache: + last_update: datetime + builds: Dict[str, 'BuildInfo'] + + def to_dict(self): + return { + 'last_update': self.last_update.isoformat(), + 'builds': {k: asdict(v) for k, v in self.builds.items()} + } + + @classmethod + def from_dict(cls, data: dict) -> 'BuildCache': + return cls( + last_update=datetime.fromisoformat(data['last_update']), + builds={k: BuildInfo(**v) for k, v in data['builds'].items()} + ) + +class CacheProvider(ABC): + @abstractmethod + def get_cache(self) -> Optional[BuildCache]: + pass + + @abstractmethod + def save_cache(self, cache: BuildCache): + pass + +class LocalCacheProvider(CacheProvider): + def __init__(self, cache_dir: str = None): + if cache_dir is None: + cache_dir = os.path.join(os.path.expanduser("~"), ".develocity_cache") + self.cache_file = os.path.join(cache_dir, "build_cache.pkl") + os.makedirs(cache_dir, exist_ok=True) + + def get_cache(self) -> Optional[BuildCache]: + try: + if os.path.exists(self.cache_file): + with open(self.cache_file, 'rb') as f: + return pickle.load(f) + except Exception as e: + logger.warning(f"Failed to load local cache: {e}") + return None + + def save_cache(self, cache: BuildCache): + try: + with open(self.cache_file, 'wb') as f: + pickle.dump(cache, f) + except Exception as e: + logger.warning(f"Failed to save local cache: {e}") + +class GitHubActionsCacheProvider(CacheProvider): + def __init__(self): + self.cache_key = "develocity-build-cache" + + def get_cache(self) -> Optional[BuildCache]: + try: + # Check if running in GitHub Actions + if not os.environ.get('GITHUB_ACTIONS'): + return None + + cache_path = os.environ.get('GITHUB_WORKSPACE', '') + cache_file = os.path.join(cache_path, self.cache_key + '.json') + + if os.path.exists(cache_file): + with open(cache_file, 'r') as f: + data = json.load(f) + return BuildCache.from_dict(data) + except Exception as e: + logger.warning(f"Failed to load GitHub Actions cache: {e}") + return None + + def save_cache(self, cache: BuildCache): + try: + if not os.environ.get('GITHUB_ACTIONS'): + return + + cache_path = os.environ.get('GITHUB_WORKSPACE', '') + cache_file = os.path.join(cache_path, self.cache_key + '.json') + + with open(cache_file, 'w') as f: + json.dump(cache.to_dict(), f) + except Exception as e: + logger.warning(f"Failed to save GitHub Actions cache: {e}") + +class TestAnalyzer: + def __init__(self, base_url: str, auth_token: str): + self.base_url = base_url + self.headers = { + 'Authorization': f'Bearer {auth_token}', + 'Accept': 'application/json' + } + self.default_chunk_size = timedelta(days=14) + self.api_retry_delay = 2 # seconds + self.max_api_retries = 3 + + # Initialize cache providers + self.cache_providers = [ + GitHubActionsCacheProvider(), + LocalCacheProvider() + ] + self.build_cache = None + self._load_cache() + + def _load_cache(self): + """Load cache from the first available provider""" + for provider in self.cache_providers: + cache = provider.get_cache() + if cache is not None: + self.build_cache = cache + logger.info(f"Loaded cache from {provider.__class__.__name__}") + return + logger.info("No existing cache found") + + def _save_cache(self): + """Save cache to all providers""" + if self.build_cache: + for provider in self.cache_providers: + provider.save_cache(self.build_cache) + logger.info(f"Saved cache to {provider.__class__.__name__}") + + def build_query(self, project: str, chunk_start: datetime, chunk_end: datetime, test_type: str) -> str: + """ + Constructs the query string to be used in both build info and test containers API calls. + + Args: + project: The project name. + chunk_start: The start datetime for the chunk. + chunk_end: The end datetime for the chunk. + test_type: The type of tests to query. + + Returns: + A formatted query string. + """ + return f'project:{project} buildStartTime:[{chunk_start.isoformat()} TO {chunk_end.isoformat()}] gradle.requestedTasks:{test_type}' + + def process_chunk( + self, + chunk_start: datetime, + chunk_end: datetime, + project: str, + test_type: str, + remaining_build_ids: set, + max_builds_per_request: int + ) -> Dict[str, BuildInfo]: + """Helper method to process a single chunk of build information""" + chunk_builds = {} + + # Use the helper method to build the query + query = self.build_query(project, chunk_start, chunk_end, test_type) + + # Initialize pagination for this chunk + from_build = None + continue_chunk = True + + while continue_chunk and remaining_build_ids: + query_params = { + 'query': query, + 'models': ['gradle-attributes'], + 'allModels': 'false', + 'maxBuilds': max_builds_per_request, + 'reverse': 'false', + 'fromInstant': int(chunk_start.timestamp() * 1000) + } + + if from_build: + query_params['fromBuild'] = from_build + + for attempt in range(self.max_api_retries): + try: + response = requests.get( + f'{self.base_url}/api/builds', + headers=self.headers, + params=query_params, + timeout=(5, 30) + ) + response.raise_for_status() + break + except requests.exceptions.Timeout: + if attempt == self.max_api_retries - 1: + raise + time.sleep(self.api_retry_delay * (attempt + 1)) + except requests.exceptions.RequestException: + raise + + response_json = response.json() + + if not response_json: + break + + for build in response_json: + build_id = build['id'] + + if 'models' in build and 'gradleAttributes' in build['models']: + gradle_attrs = build['models']['gradleAttributes'] + if 'model' in gradle_attrs: + attrs = gradle_attrs['model'] + build_timestamp = datetime.fromtimestamp(attrs['buildStartTime'] / 1000, pytz.UTC) + + if build_timestamp >= chunk_end: + continue_chunk = False + break + + if build_id in remaining_build_ids: + if 'problem' not in gradle_attrs: + chunk_builds[build_id] = BuildInfo( + id=build_id, + timestamp=build_timestamp, + duration=attrs.get('buildDuration'), + has_failed=attrs.get('hasFailed', False) + ) + + if continue_chunk and response_json: + from_build = response_json[-1]['id'] + else: + continue_chunk = False + + time.sleep(0.5) # Rate limiting between pagination requests + + return chunk_builds + + def get_build_info(self, build_ids: List[str], project: str, test_type: str, query_days: int) -> Dict[str, BuildInfo]: + builds = {} + max_builds_per_request = 100 + cutoff_date = datetime.now(pytz.UTC) - timedelta(days=query_days) + + # Get builds from cache if available + if self.build_cache: + cached_builds = self.build_cache.builds + cached_cutoff = self.build_cache.last_update - timedelta(days=query_days) + + # Use cached data for builds within the cache period + for build_id in build_ids: + if build_id in cached_builds: + build = cached_builds[build_id] + if build.timestamp >= cached_cutoff: + builds[build_id] = build + + # Update cutoff date to only fetch new data + cutoff_date = self.build_cache.last_update + logger.info(f"Using cached data up to {cutoff_date.isoformat()}") + + # Remove already found builds from the search list + build_ids = [bid for bid in build_ids if bid not in builds] + + if not build_ids: + logger.info("All builds found in cache") + return builds + + # Fetch remaining builds from API + remaining_build_ids = set(build_ids) + current_time = datetime.now(pytz.UTC) + chunk_size = self.default_chunk_size + + # Create time chunks + chunks = [] + chunk_start = cutoff_date + while chunk_start < current_time: + chunk_end = min(chunk_start + chunk_size, current_time) + chunks.append((chunk_start, chunk_end)) + chunk_start = chunk_end + + total_start_time = time.time() + + # Process chunks in parallel + with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: + future_to_chunk = { + executor.submit( + self.process_chunk, + chunk[0], + chunk[1], + project, + test_type, + remaining_build_ids.copy(), + max_builds_per_request + ): chunk for chunk in chunks + } + + for future in concurrent.futures.as_completed(future_to_chunk): + try: + chunk_builds = future.result() + builds.update(chunk_builds) + remaining_build_ids -= set(chunk_builds.keys()) + except Exception as e: + logger.error(f"Chunk processing generated an exception: {str(e)}") + + total_duration = time.time() - total_start_time + logger.info( + f"\nBuild Info Performance:" + f"\n Total Duration: {total_duration:.2f}s" + f"\n Builds Retrieved: {len(builds)}" + f"\n Builds Not Found: {len(remaining_build_ids)}" + ) + + # Update cache with new data + if builds: + if not self.build_cache: + self.build_cache = BuildCache(current_time, {}) + self.build_cache.builds.update(builds) + self.build_cache.last_update = current_time + self._save_cache() + + return builds + + def get_test_results(self, project: str, threshold_days: int, test_type: str = "quarantinedTest", + outcomes: List[str] = None) -> List[TestResult]: + """Fetch test results with timeline information""" + if outcomes is None: + outcomes = ["failed", "flaky"] + + logger.debug(f"Fetching test results for project {project}, last {threshold_days} days") + + end_time = datetime.now(pytz.UTC) + start_time = end_time - timedelta(days=threshold_days) + + all_results = {} + build_ids = set() + test_container_results = defaultdict(list) + + chunk_size = self.default_chunk_size + chunk_start = start_time + + while chunk_start < end_time: + chunk_end = min(chunk_start + chunk_size, end_time) + logger.debug(f"Processing chunk: {chunk_start} to {chunk_end}") + + # Use the helper method to build the query + query = self.build_query(project, chunk_start, chunk_end, test_type) + + query_params = { + 'query': query, + 'testOutcomes': outcomes, + 'container': '*', + 'include': ['buildScanIds'] # Explicitly request build scan IDs + } + + response = requests.get( + f'{self.base_url}/api/tests/containers', + headers=self.headers, + params=query_params + ) + response.raise_for_status() + + for test in response.json()['content']: + test_name = test['name'] + logger.debug(f"Processing test: {test_name}") + + if test_name not in all_results: + outcome_data = test['outcomeDistribution'] + if 'notSelected' in outcome_data: + outcome_data['not_selected'] = outcome_data.pop('notSelected') + outcome = TestOutcome(**outcome_data) + all_results[test_name] = TestResult(test_name, outcome, chunk_start) + + # Collect build IDs by outcome + if 'buildScanIdsByOutcome' in test: + scan_ids = test['buildScanIdsByOutcome'] + + for outcome, ids in scan_ids.items(): + if ids: # Only process if we have IDs + for build_id in ids: + build_ids.add(build_id) + test_container_results[test_name].append( + TestContainerResult(build_id=build_id, outcome=outcome) + ) + + chunk_start = chunk_end + + logger.debug(f"Total unique build IDs collected: {len(build_ids)}") + + # Fetch build information using the updated get_build_info method + builds = self.get_build_info(list(build_ids), project, test_type, threshold_days) + logger.debug(f"Retrieved {len(builds)} builds from API") + logger.debug(f"Retrieved build IDs: {sorted(builds.keys())}") + + # Update test results with timeline information + for test_name, result in all_results.items(): + logger.debug(f"\nProcessing timeline for test: {test_name}") + timeline = [] + for container_result in test_container_results[test_name]: + logger.debug(f"Processing container result: {container_result}") + if container_result.build_id in builds: + build_info = builds[container_result.build_id] + timeline.append(TestTimelineEntry( + build_id=container_result.build_id, + timestamp=build_info.timestamp, + outcome=container_result.outcome + )) + else: + logger.warning(f"Build ID {container_result.build_id} not found in builds response") + + # Sort timeline by timestamp + result.timeline = sorted(timeline, key=lambda x: x.timestamp) + logger.debug(f"Final timeline entries for {test_name}: {len(result.timeline)}") + + # Calculate recent failure rate + recent_cutoff = datetime.now(pytz.UTC) - timedelta(days=30) + recent_runs = [t for t in timeline if t.timestamp >= recent_cutoff] + if recent_runs: + recent_failures = sum(1 for t in recent_runs if t.outcome in ('failed', 'flaky')) + result.recent_failure_rate = recent_failures / len(recent_runs) + + return list(all_results.values()) + + def get_defective_tests(self, results: List[TestResult]) -> Dict[str, TestResult]: + """ + Analyze test results to find defective tests (failed or flaky) + """ + defective_tests = {} + + for result in results: + if result.outcome_distribution.failed > 0 or result.outcome_distribution.flaky > 0: + defective_tests[result.name] = result + + return defective_tests + + def get_long_quarantined_tests(self, results: List[TestResult], quarantine_threshold_days: int = 60) -> Dict[str, TestResult]: + """ + Find tests that have been quarantined longer than the threshold. + These are candidates for removal or rewriting. + + Args: + results: List of test results + quarantine_threshold_days: Number of days after which a quarantined test should be considered for removal/rewrite + """ + long_quarantined = {} + current_time = datetime.now(pytz.UTC) + + for result in results: + days_quarantined = (current_time - result.first_seen).days + if days_quarantined >= quarantine_threshold_days: + long_quarantined[result.name] = (result, days_quarantined) + + return long_quarantined + + def get_problematic_quarantined_tests( + self, + results: List[TestResult], + quarantine_threshold_days: int = 60, + min_failure_rate: float = 0.3, + recent_failure_threshold: float = 0.5 + ) -> Dict[str, Dict]: + """Enhanced version that includes test case details""" + problematic_tests = {} + current_time = datetime.now(pytz.UTC) + chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases + + for result in results: + days_quarantined = (current_time - result.first_seen).days + if days_quarantined >= quarantine_threshold_days: + total_runs = result.outcome_distribution.total + if total_runs > 0: + problem_runs = result.outcome_distribution.failed + result.outcome_distribution.flaky + failure_rate = problem_runs / total_runs + + if failure_rate >= min_failure_rate or result.recent_failure_rate >= recent_failure_threshold: + # Get detailed test case information + try: + test_cases = self.get_test_case_details( + result.name, + "kafka", + chunk_start, + current_time, + test_type="quarantinedTest" + ) + + problematic_tests[result.name] = { + 'container_result': result, + 'days_quarantined': days_quarantined, + 'failure_rate': failure_rate, + 'recent_failure_rate': result.recent_failure_rate, + 'test_cases': test_cases + } + except Exception as e: + logger.error(f"Error getting test case details for {result.name}: {str(e)}") + + return problematic_tests + + def get_test_case_details( + self, + container_name: str, + project: str, + chunk_start: datetime, + chunk_end: datetime, + test_type: str = "quarantinedTest" + ) -> List[TestCaseResult]: + """ + Fetch detailed test case results for a specific container. + + Args: + container_name: Name of the test container + project: The project name + chunk_start: Start time for the query + chunk_end: End time for the query + test_type: Type of tests to query (default: "quarantinedTest") + """ + # Use the helper method to build the query, similar to get_test_results + query = self.build_query(project, chunk_start, chunk_end, test_type) + + query_params = { + 'query': query, + 'testOutcomes': ['failed', 'flaky'], + 'container': container_name, + 'include': ['buildScanIds'], # Explicitly request build scan IDs + 'limit': 1000 + } + + try: + response = requests.get( + f'{self.base_url}/api/tests/cases', + headers=self.headers, + params=query_params + ) + response.raise_for_status() + + test_cases = [] + content = response.json().get('content', []) + + # Collect all build IDs first + build_ids = set() + for test in content: + if 'buildScanIdsByOutcome' in test: + for outcome_type, ids in test['buildScanIdsByOutcome'].items(): + build_ids.update(ids) + + # Get build info for all build IDs + builds = self.get_build_info(list(build_ids), project, test_type, 7) # 7 days for test cases + + for test in content: + outcome_data = test['outcomeDistribution'] + if 'notSelected' in outcome_data: + outcome_data['not_selected'] = outcome_data.pop('notSelected') + outcome = TestOutcome(**outcome_data) + + test_case = TestCaseResult( + name=test['name'], + outcome_distribution=outcome, + first_seen=chunk_start, + container_name=container_name + ) + + # Add build information with proper timestamps + if 'buildScanIdsByOutcome' in test: + for outcome_type, build_ids in test['buildScanIdsByOutcome'].items(): + for build_id in build_ids: + if build_id in builds: + build_info = builds[build_id] + test_case.timeline.append( + TestTimelineEntry( + build_id=build_id, + timestamp=build_info.timestamp, + outcome=outcome_type + ) + ) + else: + logger.warning(f"Build ID {build_id} not found for test case {test['name']}") + + # Sort timeline by timestamp + test_case.timeline.sort(key=lambda x: x.timestamp) + test_cases.append(test_case) + + return test_cases + + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching test case details for {container_name}: {str(e)}") + raise + + def get_flaky_test_regressions(self, project: str, results: List[TestResult], + recent_days: int = 7, min_flaky_rate: float = 0.2) -> Dict[str, Dict]: + """ + Identify tests that have recently started showing flaky behavior. + + Args: + project: The project name + results: List of test results + recent_days: Number of days to consider for recent behavior + min_flaky_rate: Minimum flaky rate to consider a test as problematic + """ + flaky_regressions = {} + current_time = datetime.now(pytz.UTC) + recent_cutoff = current_time - timedelta(days=recent_days) + + for result in results: + # Skip tests with no timeline data + if not result.timeline: + continue + + # Split timeline into recent and historical periods + recent_entries = [t for t in result.timeline if t.timestamp >= recent_cutoff] + historical_entries = [t for t in result.timeline if t.timestamp < recent_cutoff] + + if not recent_entries or not historical_entries: + continue + + # Calculate flaky rates + recent_flaky = sum(1 for t in recent_entries if t.outcome == 'flaky') + recent_total = len(recent_entries) + recent_flaky_rate = recent_flaky / recent_total if recent_total > 0 else 0 + + historical_flaky = sum(1 for t in historical_entries if t.outcome == 'flaky') + historical_total = len(historical_entries) + historical_flaky_rate = historical_flaky / historical_total if historical_total > 0 else 0 + + # Check if there's a significant increase in flakiness + if recent_flaky_rate >= min_flaky_rate and recent_flaky_rate > historical_flaky_rate * 1.5: + flaky_regressions[result.name] = { + 'result': result, + 'recent_flaky_rate': recent_flaky_rate, + 'historical_flaky_rate': historical_flaky_rate, + 'recent_executions': recent_entries, + 'historical_executions': historical_entries + } + + return flaky_regressions + + def get_cleared_tests(self, project: str, results: List[TestResult], + success_threshold: float = 0.7, min_executions: int = 5) -> Dict[str, Dict]: + """ + Identify quarantined tests that are consistently passing and could be cleared. + + Args: + project: The project name + results: List of test results + success_threshold: Required percentage of successful builds to be considered cleared + min_executions: Minimum number of executions required to make a determination + """ + cleared_tests = {} + current_time = datetime.now(pytz.UTC) + + for result in results: + # Only consider tests with sufficient recent executions + recent_executions = result.timeline + if len(recent_executions) < min_executions: + continue + + # Calculate success rate + successful_runs = sum(1 for t in recent_executions + if t.outcome == 'passed') + success_rate = successful_runs / len(recent_executions) + + # Check if the test meets clearing criteria + if success_rate >= success_threshold: + # Verify no recent failures or flaky behavior + has_recent_issues = any(t.outcome in ['failed', 'flaky'] + for t in recent_executions[-min_executions:]) + + if not has_recent_issues: + cleared_tests[result.name] = { + 'result': result, + 'success_rate': success_rate, + 'total_executions': len(recent_executions), + 'successful_runs': successful_runs, + 'recent_executions': recent_executions[-min_executions:] + } + + return cleared_tests + +def print_summary(problematic_tests: Dict[str, Dict], flaky_regressions: Dict[str, Dict]): + """Print a summary of the most problematic tests at the top of the report""" + print("\nSummary of Most Problematic Tests") + print("=" * 50) + + # Combine and sort all test cases by failure rate + all_problem_cases = [] + + # Process problematic quarantined tests + for class_name, details in problematic_tests.items(): + for test_case in details['test_cases']: + total_runs = test_case.outcome_distribution.total + if total_runs > 0: + failure_rate = (test_case.outcome_distribution.failed + + test_case.outcome_distribution.flaky) / total_runs + all_problem_cases.append({ + 'class': class_name, + 'method': test_case.name.split('.')[-1], + 'failure_rate': failure_rate, + 'total_runs': total_runs + }) + + # Process flaky regressions + for test_name, details in flaky_regressions.items(): + all_problem_cases.append({ + 'class': test_name, + 'method': 'N/A', # Flaky regressions are at class level + 'failure_rate': details['recent_flaky_rate'], + 'total_runs': len(details['recent_executions']) + }) + + # Sort by failure rate descending + sorted_cases = sorted(all_problem_cases, + key=lambda x: x['failure_rate'], + reverse=True) + + # Group by class + by_class = {} + for case in sorted_cases: + if case['class'] not in by_class: + by_class[case['class']] = [] + by_class[case['class']].append(case) + + # Print summary + for class_name, cases in by_class.items(): + print(f"\n{class_name}") + for case in cases: + method = case['method'] + if method != 'N/A': + print(f" → {method:<60} {case['failure_rate']:.2%}") + else: + print(f" → Class-level flakiness rate: {case['failure_rate']:.2%}") + + print("\n" + "=" * 50) + +def main(): + # Configuration + BASE_URL = "https://ge.apache.org" + AUTH_TOKEN = os.environ.get("DEVELOCITY_ACCESS_TOKEN") + PROJECT = "kafka" + QUARANTINE_THRESHOLD_DAYS = 7 + MIN_FAILURE_RATE = 0.1 + RECENT_FAILURE_THRESHOLD = 0.5 + SUCCESS_THRESHOLD = 0.7 # For cleared tests + MIN_FLAKY_RATE = 0.2 # For flaky regressions + + analyzer = TestAnalyzer(BASE_URL, AUTH_TOKEN) + + try: + # Get quarantined test results + quarantined_results = analyzer.get_test_results( + PROJECT, + threshold_days=QUARANTINE_THRESHOLD_DAYS, + test_type="quarantinedTest" + ) + + # Get regular test results for flaky regression analysis + regular_results = analyzer.get_test_results( + PROJECT, + threshold_days=7, # Last 7 days for regular tests + test_type="test" + ) + + # Generate reports + problematic_tests = analyzer.get_problematic_quarantined_tests( + quarantined_results, + QUARANTINE_THRESHOLD_DAYS, + MIN_FAILURE_RATE, + RECENT_FAILURE_THRESHOLD + ) + + flaky_regressions = analyzer.get_flaky_test_regressions( + PROJECT, + regular_results, + recent_days=7, + min_flaky_rate=MIN_FLAKY_RATE + ) + + cleared_tests = analyzer.get_cleared_tests( + PROJECT, + quarantined_results, + success_threshold=SUCCESS_THRESHOLD + ) + + # Print summary first + print(f"\nTest Analysis Report ({datetime.now(pytz.UTC).strftime('%Y-%m-%d %H:%M:%S')} UTC)") + print("=" * 100) + print_summary(problematic_tests, flaky_regressions) + + # Then print detailed reports + print("\nDetailed Test Reports") + print("=" * 100) + + # Print Flaky Test Regressions + print("\nFlaky Test Regressions") + print("-" * 50) + if not flaky_regressions: + print("No flaky test regressions found.") + else: + for test_name, details in flaky_regressions.items(): + print(f"\n{test_name}") + print(f"Recent Flaky Rate: {details['recent_flaky_rate']:.2%}") + print(f"Historical Flaky Rate: {details['historical_flaky_rate']:.2%}") + print(f"\nRecent Executions (last {len(details['recent_executions'])} runs):") + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp)[-5:]: + print(f" {entry.timestamp.strftime('%Y-%m-%d %H:%M')} - {entry.outcome}") + + # Print Cleared Tests + print("\nCleared Tests (Ready for Unquarantine)") + print("-" * 50) + if not cleared_tests: + print("No tests ready to be cleared from quarantine.") + else: + for test_name, details in cleared_tests.items(): + print(f"\n{test_name}") + print(f"Success Rate: {details['success_rate']:.2%}") + print(f"Total Executions: {details['total_executions']}") + print(f"\nRecent Executions (last {len(details['recent_executions'])} runs):") + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp): + print(f" {entry.timestamp.strftime('%Y-%m-%d %H:%M')} - {entry.outcome}") + + # Print Defective Tests + print("\nHigh-Priority Quarantined Tests") + print("-" * 50) + if not problematic_tests: + print("No high-priority quarantined tests found.") + else: + sorted_tests = sorted( + problematic_tests.items(), + key=lambda x: (x[1]['failure_rate'], x[1]['days_quarantined']), + reverse=True + ) + + print(f"\nFound {len(sorted_tests)} high-priority quarantined test classes:") + for class_name, details in sorted_tests: + class_result = details['container_result'] + + print(f"\n{class_name}") + print("=" * len(class_name)) + print(f"Quarantined for {details['days_quarantined']} days") + print(f"Class Failure Rate: {details['failure_rate']:.2%}") + print(f"Recent Failure Rate: {details['recent_failure_rate']:.2%}") + print("\nClass Statistics:") + print(f" Total Runs: {class_result.outcome_distribution.total}") + print(f" Failed: {class_result.outcome_distribution.failed}") + print(f" Flaky: {class_result.outcome_distribution.flaky}") + print(f" Passed: {class_result.outcome_distribution.passed}") + + # Show class timeline + if class_result.timeline: + print(f"\nClass Recent Executions (last {min(5, len(class_result.timeline))} of {len(class_result.timeline)} runs):") + print(" Date/Time (UTC) Outcome Build ID") + print(" " + "-" * 48) + for entry in sorted(class_result.timeline, key=lambda x: x.timestamp)[-5:]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f" {date_str:<17} {entry.outcome:<10} {entry.build_id}") + + print("\nTest Methods (Last 7 Days):") + print(" " + "-" * 48) + + # Sort test methods by failure rate + sorted_methods = sorted( + details['test_cases'], + key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total if x.outcome_distribution.total > 0 else 0, + reverse=True + ) + + for test_method in sorted_methods: + total_runs = test_method.outcome_distribution.total + if total_runs > 0: + failure_rate = (test_method.outcome_distribution.failed + test_method.outcome_distribution.flaky) / total_runs + + # Extract the method name from the full test name + method_name = test_method.name.split('.')[-1] + + print(f"\n → {method_name}") + print(f" Failure Rate: {failure_rate:.2%}") + print(f" Runs: {total_runs:3d} | Failed: {test_method.outcome_distribution.failed:3d} | " + f"Flaky: {test_method.outcome_distribution.flaky:3d} | " + f"Passed: {test_method.outcome_distribution.passed:3d}") + + # Show test method timeline + if test_method.timeline: + print(f"\n Recent Executions (last {min(3, len(test_method.timeline))} of {len(test_method.timeline)} runs):") + print(" Date/Time (UTC) Outcome Build ID") + print(" " + "-" * 44) + for entry in sorted(test_method.timeline, key=lambda x: x.timestamp)[-3:]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f" {date_str:<17} {entry.outcome:<10} {entry.build_id}") + + print("\n" + "-" * 50) + + print("\n" + "=" * 100) + + except Exception as e: + logger.exception("Error occurred during report generation") + print(f"Error occurred: {str(e)}") + + +if __name__ == "__main__": + # Configure logging + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s' + ) + main() diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt index 900ce9a9b61bc..d59455f79dac6 100644 --- a/.github/scripts/requirements.txt +++ b/.github/scripts/requirements.txt @@ -13,3 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. PyYAML~=6.0 +pytz==2024.2 +requests==2.32.3 diff --git a/.github/workflows/docker_scan.yml b/.github/workflows/docker_scan.yml index 693b80c0626b6..a76916fffa916 100644 --- a/.github/workflows/docker_scan.yml +++ b/.github/workflows/docker_scan.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: # This is an array of supported tags. Make sure this array only contains the supported tags - supported_image_tag: ['latest', '3.7.1', "3.8.0", "3.8.1", "3.9.0"] + supported_image_tag: ['latest', '3.7.2', '3.8.1', '3.9.0'] steps: - name: Run CVE scan uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 diff --git a/.github/workflows/pr-reviewed.yml b/.github/workflows/pr-reviewed.yml index 2f6cae8a4fe97..ea2a35f58e8c4 100644 --- a/.github/workflows/pr-reviewed.yml +++ b/.github/workflows/pr-reviewed.yml @@ -40,6 +40,7 @@ jobs: name: pr-number.txt - name: Remove label uses: actions/github-script@v7 + continue-on-error: true with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/LICENSE-binary b/LICENSE-binary index 097185c9f02f1..8e5f7c14342ec 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -307,7 +307,6 @@ see: licenses/CDDL+GPL-1.1 javax.activation-api-1.2.0 javax.annotation-api-1.3.2 javax.servlet-api-3.1.0 -javax.ws.rs-api-2.1.1 jaxb-api-2.3.1 activation-1.1.1 diff --git a/build.gradle b/build.gradle index ef599d7a4e7d0..78bf91d799f3b 100644 --- a/build.gradle +++ b/build.gradle @@ -124,7 +124,7 @@ ext { def releaseVersion = modulesNeedingJava11.any { projectPath == it } ? minClientJavaVersion : minNonClientJavaVersion options.compilerArgs << "-encoding" << "UTF-8" - options.compilerArgs += ["--release", String.valueOf(releaseVersion)] + options.release = releaseVersion if (name in ["compileTestJava", "compileTestScala"]) { options.compilerArgs << "-parameters" @@ -1843,7 +1843,7 @@ project(':generator') { implementation libs.argparse4j implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation 'org.eclipse.jgit:org.eclipse.jgit:6.4.0.202211300538-r' // SSH support for JGit based on Apache MINA sshd @@ -1890,7 +1890,7 @@ project(':clients') { compileOnly libs.jose4j // for SASL/OAUTHBEARER JWT validation; only used by broker testImplementation libs.bcpkix - testImplementation libs.jacksonJaxrsJsonProvider + testImplementation libs.jacksonJakartarsJsonProvider testImplementation libs.jacksonDatabindYaml testImplementation libs.jose4j testImplementation libs.junitJupiter @@ -2467,7 +2467,7 @@ project(':tools') { implementation libs.re2j implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider compileOnly libs.spotbugs @@ -2539,14 +2539,20 @@ project(':trogdor') { runtimeOnly libs.log4j2Core runtimeOnly libs.log4j1Bridge2Api - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation project(':group-coordinator') implementation project(':group-coordinator:group-coordinator-api') @@ -2599,7 +2605,7 @@ project(':shell') { implementation project(':raft') implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider testImplementation project(':clients') testImplementation project(':clients').sourceSets.test.output @@ -2932,11 +2938,7 @@ project(':streams:examples') { dependencies { // this dependency should be removed after we unify data API - implementation(project(':connect:json')) { - // this transitive dependency is not used in Streams, and it breaks SBT builds - exclude module: 'javax.ws.rs-api' - } - + implementation(project(':connect:json')) implementation project(':streams') implementation libs.slf4jLog4j2 @@ -3381,7 +3383,7 @@ project(':connect:api') { runtimeOnly libs.log4j2Api runtimeOnly libs.log4j2Core runtimeOnly libs.log4j1Bridge2Api - implementation libs.jaxrsApi + implementation libs.jakartaRsApi testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output @@ -3511,15 +3513,23 @@ project(':connect:runtime') { implementation libs.log4j1Bridge2Api implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation implementation libs.jacksonAnnotations - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets - implementation libs.jettyClient + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyClient) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation libs.classgraph implementation libs.mavenArtifact implementation libs.swaggerAnnotations @@ -3694,7 +3704,7 @@ project(':connect:basic-auth-extension') { runtimeOnly libs.log4j2Api runtimeOnly libs.log4j2Core runtimeOnly libs.log4j1Bridge2Api - implementation libs.jaxrsApi + implementation libs.jakartaRsApi implementation libs.jaxAnnotationApi testImplementation libs.bcpkix @@ -3742,15 +3752,23 @@ project(':connect:mirror') { runtimeOnly libs.log4j2Core runtimeOnly libs.log4j1Bridge2Api implementation libs.jacksonAnnotations - implementation libs.jacksonJaxrsJsonProvider + implementation libs.jacksonJakartarsJsonProvider implementation libs.jerseyContainerServlet implementation libs.jerseyHk2 implementation libs.jaxbApi // Jersey dependency that was available in the JDK before Java 9 implementation libs.activation // Jersey dependency that was available in the JDK before Java 9 - implementation libs.jettyServer - implementation libs.jettyServlet - implementation libs.jettyServlets - implementation libs.jettyClient + implementation (libs.jettyServer) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlet) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyServlets) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } + implementation (libs.jettyClient) { + exclude group: 'org.slf4j', module: 'slf4j-api' + } implementation libs.swaggerAnnotations testImplementation libs.junitJupiter diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 03614ede922a9..0b9e7dd717bc3 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -364,8 +364,8 @@ - - + + @@ -533,7 +533,7 @@ - + @@ -560,8 +560,8 @@ - - + + @@ -571,15 +571,16 @@ + - - + + - + @@ -589,12 +590,11 @@ - - + @@ -626,8 +626,8 @@ - - + + @@ -639,7 +639,7 @@ - + diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index de01396c8c70b..aa3b5c9d628c9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -206,7 +206,8 @@ public class CommonClientConfigs { + "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, " + "then the broker will remove this client from the group and initiate a rebalance. Note that the value " + "must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms " - + "and group.max.session.timeout.ms."; + + "and group.max.session.timeout.ms. Note that this configuration is not supported when group.protocol " + + "is set to \"consumer\"."; public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms"; public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer " diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java index 2eac1f055f6e0..a6ef6f7f0f34d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterOptions.java @@ -29,6 +29,8 @@ public class DescribeClusterOptions extends AbstractOptions duration() { + return duration; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java index c1e367055e407..4664267a0e858 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java @@ -139,22 +139,34 @@ public void handleCoordinatorDisconnect(Throwable exception, long currentTimeMs) } /** - * Mark the current coordinator null. + * Mark the coordinator as "unknown" (i.e. {@code null}) when a disconnect is detected. This detection can occur + * in one of two paths: * - * @param cause why the coordinator is marked unknown. - * @param currentTimeMs the current time in ms. + *
    + *
  1. The coordinator was discovered, but then later disconnected
  2. + *
  3. The coordinator has not yet been discovered and/or connected
  4. + *
+ * + * @param cause String explanation of why the coordinator is marked unknown + * @param currentTimeMs Current time in milliseconds */ public void markCoordinatorUnknown(final String cause, final long currentTimeMs) { - if (this.coordinator != null) { - log.info("Group coordinator {} is unavailable or invalid due to cause: {}. " - + "Rediscovery will be attempted.", this.coordinator, cause); - this.coordinator = null; + if (coordinator != null || timeMarkedUnknownMs == -1) { timeMarkedUnknownMs = currentTimeMs; totalDisconnectedMin = 0; + } + + if (coordinator != null) { + log.info( + "Group coordinator {} is unavailable or invalid due to cause: {}. Rediscovery will be attempted.", + coordinator, + cause + ); + coordinator = null; } else { long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs); long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS; - if (currDisconnectMin > this.totalDisconnectedMin) { + if (currDisconnectMin > totalDisconnectedMin) { log.debug("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs); totalDisconnectedMin = currDisconnectMin; } diff --git a/clients/src/main/java/org/apache/kafka/common/Node.java b/clients/src/main/java/org/apache/kafka/common/Node.java index 020d2bcaf3355..e47d941e0f902 100644 --- a/clients/src/main/java/org/apache/kafka/common/Node.java +++ b/clients/src/main/java/org/apache/kafka/common/Node.java @@ -30,12 +30,13 @@ public class Node { private final String host; private final int port; private final String rack; + private final boolean isFenced; // Cache hashCode as it is called in performance sensitive parts of the code (e.g. RecordAccumulator.ready) private Integer hash; public Node(int id, String host, int port) { - this(id, host, port, null); + this(id, host, port, null, false); } public Node(int id, String host, int port, String rack) { @@ -44,6 +45,16 @@ public Node(int id, String host, int port, String rack) { this.host = host; this.port = port; this.rack = rack; + this.isFenced = false; + } + + public Node(int id, String host, int port, String rack, boolean isFenced) { + this.id = id; + this.idString = Integer.toString(id); + this.host = host; + this.port = port; + this.rack = rack; + this.isFenced = isFenced; } public static Node noNode() { @@ -102,6 +113,13 @@ public String rack() { return rack; } + /** + * Whether if this node is fenced + */ + public boolean isFenced() { + return isFenced; + } + @Override public int hashCode() { Integer h = this.hash; @@ -110,6 +128,7 @@ public int hashCode() { result = 31 * result + id; result = 31 * result + port; result = 31 * result + ((rack == null) ? 0 : rack.hashCode()); + result = 31 * result + Objects.hashCode(isFenced); this.hash = result; return result; } else { @@ -127,12 +146,13 @@ public boolean equals(Object obj) { return id == other.id && port == other.port && Objects.equals(host, other.host) && - Objects.equals(rack, other.rack); + Objects.equals(rack, other.rack) && + Objects.equals(isFenced, other.isFenced); } @Override public String toString() { - return host + ":" + port + " (id: " + idString + " rack: " + rack + ")"; + return host + ":" + port + " (id: " + idString + " rack: " + rack + " isFenced: " + isFenced + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java index 4964a8a8a9d17..7c892874214e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java @@ -39,7 +39,7 @@ public DescribeClusterResponse(DescribeClusterResponseData data) { public Map nodes() { return data.brokers().valuesList().stream() - .map(b -> new Node(b.brokerId(), b.host(), b.port(), b.rack())) + .map(b -> new Node(b.brokerId(), b.host(), b.port(), b.rack(), b.isFenced())) .collect(Collectors.toMap(Node::id, Function.identity())); } diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java index 91df6b8aac513..705aafaaa70db 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java @@ -342,6 +342,10 @@ public long timeToNextUpdate(long requestTimeoutMs) { timeMs = Long.MAX_VALUE; log.trace("For telemetry state {}, returning the value {} ms; the terminating push is in progress, disabling telemetry for further requests", localState, timeMs); break; + case TERMINATED: + timeMs = Long.MAX_VALUE; + log.trace("For telemetry state {}, returning the value {} ms; telemetry is terminated, no further requests will be made", localState, timeMs); + break; case TERMINATING_PUSH_NEEDED: timeMs = 0; log.trace("For telemetry state {}, returning the value {} ms; the client should try to submit the final {} network API request ASAP before closing", localState, timeMs, ApiKeys.PUSH_TELEMETRY.name); diff --git a/clients/src/main/resources/common/message/DescribeClusterRequest.json b/clients/src/main/resources/common/message/DescribeClusterRequest.json index 34ebe013bb1a0..71e00df09b2f9 100644 --- a/clients/src/main/resources/common/message/DescribeClusterRequest.json +++ b/clients/src/main/resources/common/message/DescribeClusterRequest.json @@ -20,13 +20,16 @@ "name": "DescribeClusterRequest", // // Version 1 adds EndpointType for KIP-919 support. + // Version 2 adds IncludeFencedBrokers for KIP-1073 support. // - "validVersions": "0-1", + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "IncludeClusterAuthorizedOperations", "type": "bool", "versions": "0+", "about": "Whether to include cluster authorized operations." }, { "name": "EndpointType", "type": "int8", "versions": "1+", "default": "1", - "about": "The endpoint type to describe. 1=brokers, 2=controllers." } + "about": "The endpoint type to describe. 1=brokers, 2=controllers." }, + { "name": "IncludeFencedBrokers", "type": "bool", "versions": "2+", + "about": "Whether to include fenced brokers when listing brokers." } ] } diff --git a/clients/src/main/resources/common/message/DescribeClusterResponse.json b/clients/src/main/resources/common/message/DescribeClusterResponse.json index cd30dcfe18c35..a17e427c8c3e2 100644 --- a/clients/src/main/resources/common/message/DescribeClusterResponse.json +++ b/clients/src/main/resources/common/message/DescribeClusterResponse.json @@ -20,8 +20,9 @@ // // Version 1 adds the EndpointType field, and makes MISMATCHED_ENDPOINT_TYPE and // UNSUPPORTED_ENDPOINT_TYPE valid top-level response error codes. + // Version 2 adds IsFenced field to Brokers for KIP-1073 support. // - "validVersions": "0-1", + "validVersions": "0-2", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", @@ -45,7 +46,9 @@ { "name": "Port", "type": "int32", "versions": "0+", "about": "The broker port." }, { "name": "Rack", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", - "about": "The rack of the broker, or null if it has not been assigned to a rack." } + "about": "The rack of the broker, or null if it has not been assigned to a rack." }, + { "name": "IsFenced", "type": "bool", "versions": "2+", + "about": "Whether the broker is fenced" } ]}, { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "0+", "default": "-2147483648", "about": "32-bit bitfield to represent authorized operations for this cluster." } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index 44f6e1f5a8891..1d1ae3e884b49 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -3166,6 +3166,23 @@ public void testListGroupsWithTypesOlderBrokerVersion() { } } + @Test + public void testDescribeClusterHandleUnsupportedVersionForIncludingFencedBrokers() { + ApiVersion describeClusterV1 = new ApiVersion() + .setApiKey(ApiKeys.DESCRIBE_CLUSTER.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 1); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(describeClusterV1))); + + env.kafkaClient().prepareUnsupportedVersionResponse( + request -> request instanceof DescribeClusterRequest); + + final DescribeClusterResult result = env.adminClient().describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)); + TestUtils.assertFutureThrows(result.nodes(), UnsupportedVersionException.class); + } + } + @Test public void testListConsumerGroups() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java index fa2a1f19bddaa..b5ab39e62c720 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java @@ -41,6 +41,7 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.time.Duration; @@ -265,6 +266,7 @@ public void testMetadataFailurePropagated() { assertEquals(metadataException, exc); } + @Disabled("KAFKA-17554") @Test public void testFutureCompletionOutsidePoll() throws Exception { // Tests the scenario in which the request that is being awaited in one thread diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java index c003574a23f82..7e805dc3cd3b6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java @@ -28,16 +28,24 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.RequestHeader; +import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Collections; +import java.util.List; +import java.util.Objects; import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -75,6 +83,78 @@ public void testSuccessfulResponse() { assertEquals(Collections.emptyList(), pollResult.unsentRequests); } + /** + * This test mimics a client that has been disconnected from the coordinator. When the client remains disconnected + * from the coordinator for 60 seconds, the client will begin to emit a warning log every minute thereafter to + * alert the user about the ongoing disconnect status. The warning log includes the length of time of the ongoing + * disconnect: + * + * + * Consumer has been disconnected from the group coordinator for XXXXXms + * + * + *

+ * + * However, the logic used to calculate the length of the disconnect was not correct. This test exercises the + * disconnect logic, controlling the logging and system time, to ensure the warning message is correct. + * + * @see CoordinatorRequestManager#markCoordinatorUnknown(String, long) + */ + @Test + public void testMarkCoordinatorUnknownLoggingAccuracy() { + long oneMinute = 60000; + + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + // You'd be forgiven for assuming that a warning message would be logged at WARN, but + // markCoordinatorUnknown logs the warning at DEBUG. This is partly for historical parity with the + // ClassicKafkaConsumer. + appender.setClassLogger(CoordinatorRequestManager.class, Level.DEBUG); + CoordinatorRequestManager coordinatorRequestManager = setupCoordinatorManager(GROUP_ID); + assertFalse(coordinatorRequestManager.coordinator().isPresent()); + + // Step 1: mark the coordinator as disconnected right after creation of the CoordinatorRequestManager. + // Because the disconnect occurred immediately, no warning should be logged. + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + assertTrue(millisecondsFromLog(appender).isEmpty()); + + // Step 2: sleep for one minute and mark the coordinator unknown again. Then verify that the warning was + // logged and the reported time is accurate. + time.sleep(oneMinute); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + Optional firstLogMs = millisecondsFromLog(appender); + assertTrue(firstLogMs.isPresent()); + assertEquals(oneMinute, firstLogMs.get()); + + // Step 3: sleep for *another* minute, mark the coordinator unknown again, and verify the accuracy. + time.sleep(oneMinute); + coordinatorRequestManager.markCoordinatorUnknown("test", time.milliseconds()); + Optional secondLogMs = millisecondsFromLog(appender); + assertTrue(secondLogMs.isPresent()); + assertEquals(oneMinute * 2, secondLogMs.get()); + } + } + + private Optional millisecondsFromLog(LogCaptureAppender appender) { + Pattern pattern = Pattern.compile("\\s+(?\\d+)+ms"); + List milliseconds = appender.getMessages().stream() + .map(pattern::matcher) + .filter(Matcher::find) + .map(matcher -> matcher.group("millis")) + .filter(Objects::nonNull) + .map(millisString -> { + try { + return Long.parseLong(millisString); + } catch (NumberFormatException e) { + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + + // Return the most recent log entry that matches the message in markCoordinatorUnknown, if present. + return milliseconds.isEmpty() ? Optional.empty() : Optional.of(milliseconds.get(milliseconds.size() - 1)); + } + @Test public void testMarkCoordinatorUnknown() { CoordinatorRequestManager coordinatorManager = setupCoordinatorManager(GROUP_ID); diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java index 066e9ff74de48..b708b4eeb602d 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java @@ -63,7 +63,6 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -238,7 +237,7 @@ public void testTelemetrySenderTimeToNextUpdate() { assertEquals(Long.MAX_VALUE, telemetrySender.timeToNextUpdate(100)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.TERMINATED)); - assertThrows(IllegalStateException.class, () -> telemetrySender.timeToNextUpdate(100)); + assertEquals(Long.MAX_VALUE, telemetrySender.timeToNextUpdate(100)); } @Test diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java index 73f87dd04ee05..ca960414dd5b8 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java @@ -51,7 +51,7 @@ public interface ConnectRestExtension extends Configurable, Versioned, Closeable * will invoke this method after registering the default Connect resources. If the implementations attempt * to re-register any of the Connect resources, it will be ignored and will be logged. * - * @param restPluginContext The context provides access to JAX-RS {@link javax.ws.rs.core.Configurable} and {@link + * @param restPluginContext The context provides access to JAX-RS {@link jakarta.ws.rs.core.Configurable} and {@link * ConnectClusterState}.The custom JAX-RS resources can be registered via the {@link * ConnectRestExtensionContext#configurable()} */ diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java index 5e357be8c9142..0bfcee678b1aa 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtensionContext.java @@ -19,19 +19,20 @@ import org.apache.kafka.connect.health.ConnectClusterState; -import javax.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configurable; + /** * The interface provides the ability for {@link ConnectRestExtension} implementations to access the JAX-RS - * {@link javax.ws.rs.core.Configurable} and cluster state {@link ConnectClusterState}. The implementation for the interface is provided + * {@link jakarta.ws.rs.core.Configurable} and cluster state {@link ConnectClusterState}. The implementation for the interface is provided * by the Connect framework. */ public interface ConnectRestExtensionContext { /** - * Provides an implementation of {@link javax.ws.rs.core.Configurable} that can be used to register JAX-RS resources. + * Provides an implementation of {@link jakarta.ws.rs.core.Configurable} that can be used to register JAX-RS resources. * - * @return the JAX-RS {@link javax.ws.rs.core.Configurable}; never {@code null} + * @return the JAX-RS {@link jakarta.ws.rs.core.Configurable}; never {@code null} */ Configurable> configurable(); diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java index 58aac7994aefc..a72f85d068888 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtension.java @@ -51,7 +51,7 @@ * * *

This is a reference implementation of the {@link ConnectRestExtension} interface. It registers an implementation of {@link - * javax.ws.rs.container.ContainerRequestFilter} that does JAAS based authentication of incoming Basic Auth credentials. {@link + * jakarta.ws.rs.container.ContainerRequestFilter} that does JAAS based authentication of incoming Basic Auth credentials. {@link * ConnectRestExtension} implementations are loaded via the plugin class loader using {@link java.util.ServiceLoader} mechanism and hence * the packaged jar includes {@code META-INF/services/org.apache.kafka.connect.rest.extension.ConnectRestExtension} with the entry * {@code org.apache.kafka.connect.extension.auth.jaas.BasicAuthSecurityRestExtension} diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java index b090ee21d449d..d404bdc7dc19a 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java @@ -42,12 +42,13 @@ import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.Priorities; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.container.ContainerRequestFilter; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.SecurityContext; + +import jakarta.ws.rs.HttpMethod; +import jakarta.ws.rs.Priorities; +import jakarta.ws.rs.container.ContainerRequestContext; +import jakarta.ws.rs.container.ContainerRequestFilter; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.SecurityContext; @Priority(Priorities.AUTHENTICATION) public class JaasBasicAuthFilter implements ContainerRequestFilter { diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java index 146bd6a2adf63..81f3a7327d576 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java @@ -31,7 +31,8 @@ import java.util.function.Supplier; import javax.security.auth.login.Configuration; -import javax.ws.rs.core.Configurable; + +import jakarta.ws.rs.core.Configurable; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java index 24ecadcc0a09b..bcd6e0ab31995 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java @@ -39,11 +39,12 @@ import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.ChoiceCallback; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.SecurityContext; -import javax.ws.rs.core.UriInfo; + +import jakarta.ws.rs.HttpMethod; +import jakarta.ws.rs.container.ContainerRequestContext; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.SecurityContext; +import jakarta.ws.rs.core.UriInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java index 0af3b14e3b8e9..218c64e85a478 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java @@ -132,6 +132,12 @@ public Class taskClass() { // divide consumer groups among tasks @Override public List> taskConfigs(int maxTasks) { + // If the replication is disabled or checkpoint emission is disabled by setting 'emit.checkpoints.enabled' to false, + // the interval of checkpoint emission will be negative and no 'MirrorCheckpointTask' will be created. + if (!config.enabled() || config.emitCheckpointsInterval().isNegative()) { + return Collections.emptyList(); + } + if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. @@ -139,13 +145,11 @@ public List> taskConfigs(int maxTasks) { throw new RetriableException("Timeout while loading consumer groups."); } - // if the replication is disabled, known consumer group is empty, or checkpoint emission is - // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission - // will be negative and no 'MirrorCheckpointTask' will be created - if (!config.enabled() || knownConsumerGroups.isEmpty() - || config.emitCheckpointsInterval().isNegative()) { + // If the consumer group is empty, no 'MirrorCheckpointTask' will be created. + if (knownConsumerGroups.isEmpty()) { return Collections.emptyList(); } + int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java index 06480bcf4a5a4..4c02160a194f5 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/resources/InternalMirrorResource.java @@ -24,11 +24,11 @@ import java.util.Map; -import javax.inject.Inject; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.Path; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.UriInfo; +import jakarta.inject.Inject; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.UriInfo; @Path("/{source}/{target}/connectors") public class InternalMirrorResource extends InternalClusterResource { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java index d20484e788525..2ba4438bdba9b 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java @@ -280,7 +280,13 @@ public void testMultiNodeCluster() throws Exception { // Cluster aliases final String a = "A"; // Use a convoluted cluster name to ensure URL encoding/decoding works - final String b = "B- ._~:/?#[]@!$&'()*+;=\"<>%{}|\\^`618"; + // The servlet 6.0 spec no longer allows some characters such as forward slashes, control characters, + // etc. even if they are encoded. Jetty 12 will enforce this and throw a 400 ambiguous error + // so the string of characters for the variable "b" has been updated to only include characters + // that are valid with the new spec. + // See https://jakarta.ee/specifications/servlet/6.0/jakarta-servlet-spec-6.0#uri-path-canonicalization + // and specifically the section: "10. Rejecting Suspicious Sequences." for details. + final String b = "B-_~:?#[]@!$&'()*+=\"<>{}|^`618"; final String ab = a + "->" + b; final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index 046dcc63e5a20..c4f53ad413711 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -34,7 +34,9 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.NotFoundException; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; @@ -64,6 +66,8 @@ import org.apache.kafka.connect.util.TemporaryStage; import org.apache.logging.log4j.Level; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,8 +89,6 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Function; @@ -94,9 +96,15 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_VERSION; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG; + /** * Abstract Herder implementation which handles connector/task lifecycle tracking. Extensions @@ -138,7 +146,7 @@ public abstract class AbstractHerder implements Herder, TaskStatus.Listener, Con private final Time time; protected final Loggers loggers; - private final ConcurrentMap tempConnectors = new ConcurrentHashMap<>(); + private final CachedConnectors cachedConnectors; public AbstractHerder(Worker worker, String workerId, @@ -157,6 +165,7 @@ public AbstractHerder(Worker worker, this.connectorExecutor = Executors.newCachedThreadPool(); this.time = time; this.loggers = new Loggers(time); + this.cachedConnectors = new CachedConnectors(worker.getPlugins()); } @Override @@ -398,6 +407,8 @@ protected Map validateSourceConnectorConfig(SourceConnector * may be null, in which case no validation will be performed under the assumption that the * connector will use inherit the converter settings from the worker. Some errors encountered * during validation may be {@link ConfigValue#addErrorMessage(String) added} to this object + * @param pluginVersionValue the {@link ConfigValue} for the converter version property in the connector config; + * * @param pluginInterface the interface for the plugin type * (e.g., {@code org.apache.kafka.connect.storage.Converter.class}); * may not be null @@ -418,14 +429,18 @@ protected Map validateSourceConnectorConfig(SourceConnector * @param the plugin class to perform validation for */ + @SuppressWarnings("unchecked") private ConfigInfos validateConverterConfig( Map connectorConfig, ConfigValue pluginConfigValue, + ConfigValue pluginVersionValue, Class pluginInterface, Function configDefAccessor, String pluginName, String pluginProperty, + String pluginVersionProperty, Map defaultProperties, + ClassLoader connectorLoader, Function reportStage ) { Objects.requireNonNull(connectorConfig); @@ -433,12 +448,15 @@ private ConfigInfos validateConverterConfig( Objects.requireNonNull(configDefAccessor); Objects.requireNonNull(pluginName); Objects.requireNonNull(pluginProperty); + Objects.requireNonNull(pluginVersionProperty); String pluginClass = connectorConfig.get(pluginProperty); + String pluginVersion = connectorConfig.get(pluginVersionProperty); if (pluginClass == null || pluginConfigValue == null || !pluginConfigValue.errorMessages().isEmpty() + || !pluginVersionValue.errorMessages().isEmpty() ) { // Either no custom converter was specified, or one was specified but there's a problem with it. // No need to proceed any further. @@ -448,11 +466,22 @@ private ConfigInfos validateConverterConfig( T pluginInstance; String stageDescription = "instantiating the connector's " + pluginName + " for validation"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { - pluginInstance = Utils.newInstance(pluginClass, pluginInterface); + VersionRange range = PluginUtils.connectorVersionRequirement(pluginVersion); + pluginInstance = (T) plugins().newPlugin(pluginClass, range, connectorLoader); + } catch (VersionedPluginLoadingException e) { + log.error("Failed to load {} class {} with version {}: {}", pluginName, pluginClass, pluginVersion, e); + pluginConfigValue.addErrorMessage(e.getMessage()); + pluginVersionValue.addErrorMessage(e.getMessage()); + return null; } catch (ClassNotFoundException | RuntimeException e) { log.error("Failed to instantiate {} class {}; this should have been caught by prior validation logic", pluginName, pluginClass, e); pluginConfigValue.addErrorMessage("Failed to load class " + pluginClass + (e.getMessage() != null ? ": " + e.getMessage() : "")); return null; + } catch (InvalidVersionSpecificationException e) { + // this should have been caught by prior validation logic + log.error("Invalid version range for {} class {}: {}", pluginName, pluginClass, pluginVersion, e); + pluginVersionValue.addErrorMessage(e.getMessage()); + return null; } try { @@ -494,55 +523,55 @@ private ConfigInfos validateConverterConfig( } } - private ConfigInfos validateHeaderConverterConfig( - Map connectorConfig, - ConfigValue headerConverterConfigValue, + private ConfigInfos validateAllConverterConfigs( + Map connectorProps, + Map validatedConnectorConfig, + ClassLoader connectorLoader, Function reportStage ) { - return validateConverterConfig( - connectorConfig, - headerConverterConfigValue, + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + // do custom converter-specific validation + ConfigInfos headerConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(HEADER_CONVERTER_VERSION_CONFIG), HeaderConverter.class, HeaderConverter::config, "header converter", HEADER_CONVERTER_CLASS_CONFIG, + HEADER_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), + connectorLoader, reportStage ); - } - - private ConfigInfos validateKeyConverterConfig( - Map connectorConfig, - ConfigValue keyConverterConfigValue, - Function reportStage - ) { - return validateConverterConfig( - connectorConfig, - keyConverterConfigValue, + ConfigInfos keyConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(KEY_CONVERTER_VERSION_CONFIG), Converter.class, Converter::config, "key converter", KEY_CONVERTER_CLASS_CONFIG, + KEY_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), + connectorLoader, reportStage ); - } - private ConfigInfos validateValueConverterConfig( - Map connectorConfig, - ConfigValue valueConverterConfigValue, - Function reportStage - ) { - return validateConverterConfig( - connectorConfig, - valueConverterConfigValue, + ConfigInfos valueConverterConfigInfos = validateConverterConfig( + connectorProps, + validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), + validatedConnectorConfig.get(VALUE_CONVERTER_VERSION_CONFIG), Converter.class, Converter::config, "value converter", VALUE_CONVERTER_CLASS_CONFIG, + VALUE_CONVERTER_VERSION_CONFIG, Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), + connectorLoader, reportStage ); + return mergeConfigInfos(connType, headerConverterConfigInfos, keyConverterConfigInfos, valueConverterConfigInfos); } @Override @@ -634,6 +663,146 @@ protected boolean connectorUsesProducer(org.apache.kafka.connect.health.Connecto || SinkConnectorConfig.hasDlqTopicConfig(connProps); } + private ConfigInfos validateClientOverrides( + Map connectorProps, + org.apache.kafka.connect.health.ConnectorType connectorType, + Class connectorClass, + Function reportStage, + boolean doLog + ) { + if (connectorClass == null || connectorType == null) { + return null; + } + AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); + String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + ConfigInfos producerConfigInfos = null; + ConfigInfos consumerConfigInfos = null; + ConfigInfos adminConfigInfos = null; + String stageDescription = null; + + if (connectorUsesProducer(connectorType, connectorProps)) { + stageDescription = "validating producer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + producerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, + connectorConfig, + ProducerConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.PRODUCER, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesAdmin(connectorType, connectorProps)) { + stageDescription = "validating admin config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + adminConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, + connectorConfig, + AdminClientConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.ADMIN, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesConsumer(connectorType, connectorProps)) { + stageDescription = "validating consumer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + consumerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, + connectorConfig, + ConsumerConfig.configDef(), + connectorClass, + connectorType, + ConnectorClientConfigRequest.ClientType.CONSUMER, + connectorClientConfigOverridePolicy); + } + } + return mergeConfigInfos(connType, + producerConfigInfos, + consumerConfigInfos, + adminConfigInfos + ); + } + + private ConfigInfos validateConnectorPluginSpecifiedConfigs( + Map connectorProps, + Map validatedConnectorConfig, + ConfigDef enrichedConfigDef, + Connector connector, + Function reportStage + ) { + List configValues = new ArrayList<>(validatedConnectorConfig.values()); + Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); + Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); + + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + // do custom connector-specific validation + ConfigDef configDef; + String stageDescription = "retrieving the configuration definition from the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + configDef = connector.config(); + } + if (null == configDef) { + throw new BadRequestException( + String.format( + "%s.config() must return a ConfigDef that is not null.", + connector.getClass().getName() + ) + ); + } + + Config config; + stageDescription = "performing multi-property validation for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + config = connector.validate(connectorProps); + } + if (null == config) { + throw new BadRequestException( + String.format( + "%s.validate() must return a Config that is not null.", + connector.getClass().getName() + ) + ); + } + configKeys.putAll(configDef.configKeys()); + allGroups.addAll(configDef.groups()); + configValues.addAll(config.configValues()); + return generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); + } + + private void addNullValuedErrors(Map connectorProps, Map validatedConfig) { + connectorProps.entrySet().stream() + .filter(e -> e.getValue() == null) + .map(Map.Entry::getKey) + .forEach(prop -> + validatedConfig.computeIfAbsent(prop, ConfigValue::new) + .addErrorMessage("Null value can not be supplied as the configuration value.")); + } + + private ConfigInfos invalidVersionedConnectorValidation( + Map connectorProps, + VersionedPluginLoadingException e, + Function reportStage + ) { + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + ConfigDef configDef = ConnectorConfig.enrichedConfigDef(worker.getPlugins(), connType); + Map validatedConfig; + try (TemporaryStage stage = reportStage.apply("validating connector configuration")) { + validatedConfig = configDef.validateAll(connectorProps); + } + validatedConfig.get(CONNECTOR_CLASS_CONFIG).addErrorMessage(e.getMessage()); + validatedConfig.get(CONNECTOR_VERSION).addErrorMessage(e.getMessage()); + validatedConfig.get(CONNECTOR_VERSION).recommendedValues(e.availableVersions().stream().map(v -> (Object) v).collect(Collectors.toList())); + addNullValuedErrors(connectorProps, validatedConfig); + return generateResult(connType, configDef.configKeys(), new ArrayList<>(validatedConfig.values()), new ArrayList<>(configDef.groups())); + } + ConfigInfos validateConnectorConfig( Map connectorProps, Function reportStage, @@ -646,150 +815,58 @@ ConfigInfos validateConnectorConfig( connectorProps = worker.configTransformer().transform(connectorProps); } } - String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - if (connType == null) + String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); + if (connType == null) { throw new BadRequestException("Connector config " + connectorProps + " contains no connector type"); + } + + VersionRange connVersion; + Connector connector; + ClassLoader connectorLoader; + try { + connVersion = PluginUtils.connectorVersionRequirement(connectorProps.get(CONNECTOR_VERSION)); + connector = cachedConnectors.getConnector(connType, connVersion); + connectorLoader = plugins().pluginLoader(connType, connVersion); + log.info("Validating connector {}, version {}", connType, connector.version()); + } catch (VersionedPluginLoadingException e) { + log.warn("Failed to load connector {} with version {}, skipping additional validations (connector, converters, transformations, client overrides) ", + connType, connectorProps.get(CONNECTOR_VERSION), e); + return invalidVersionedConnectorValidation(connectorProps, e, reportStage); + } catch (Exception e) { + throw new BadRequestException(e.getMessage(), e); + } - Connector connector = getConnector(connType); - ClassLoader connectorLoader = plugins().connectorLoader(connType); try (LoaderSwap loaderSwap = plugins().withClassLoader(connectorLoader)) { - org.apache.kafka.connect.health.ConnectorType connectorType; + ConfigDef enrichedConfigDef; Map validatedConnectorConfig; + org.apache.kafka.connect.health.ConnectorType connectorType; if (connector instanceof SourceConnector) { connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.configDef(), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); stageDescription = "validating source connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSourceConnectorConfig((SourceConnector) connector, enrichedConfigDef, connectorProps); } } else { connectorType = org.apache.kafka.connect.health.ConnectorType.SINK; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.configDef(), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); stageDescription = "validating sink connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSinkConnectorConfig((SinkConnector) connector, enrichedConfigDef, connectorProps); } } - connectorProps.entrySet().stream() - .filter(e -> e.getValue() == null) - .map(Map.Entry::getKey) - .forEach(prop -> - validatedConnectorConfig.computeIfAbsent(prop, ConfigValue::new) - .addErrorMessage("Null value can not be supplied as the configuration value.") - ); - - List configValues = new ArrayList<>(validatedConnectorConfig.values()); - Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); - Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); + addNullValuedErrors(connectorProps, validatedConnectorConfig); - // do custom connector-specific validation - ConfigDef configDef; - stageDescription = "retrieving the configuration definition from the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - configDef = connector.config(); - } - if (null == configDef) { - throw new BadRequestException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - connector.getClass().getName() - ) - ); - } + ConfigInfos connectorConfigInfo = validateConnectorPluginSpecifiedConfigs(connectorProps, validatedConnectorConfig, enrichedConfigDef, connector, reportStage); + ConfigInfos converterConfigInfo = validateAllConverterConfigs(connectorProps, validatedConnectorConfig, connectorLoader, reportStage); + ConfigInfos clientOverrideInfo = validateClientOverrides(connectorProps, connectorType, connector.getClass(), reportStage, doLog); - Config config; - stageDescription = "performing multi-property validation for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - config = connector.validate(connectorProps); - } - if (null == config) { - throw new BadRequestException( - String.format( - "%s.validate() must return a Config that is not null.", - connector.getClass().getName() - ) - ); - } - configKeys.putAll(configDef.configKeys()); - allGroups.addAll(configDef.groups()); - configValues.addAll(config.configValues()); - - // do custom converter-specific validation - ConfigInfos headerConverterConfigInfos = validateHeaderConverterConfig( - connectorProps, - validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), - reportStage - ); - ConfigInfos keyConverterConfigInfos = validateKeyConverterConfig( - connectorProps, - validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), - reportStage - ); - ConfigInfos valueConverterConfigInfos = validateValueConverterConfig( - connectorProps, - validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), - reportStage - ); - - ConfigInfos configInfos = generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); - AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); - String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); - ConfigInfos producerConfigInfos = null; - ConfigInfos consumerConfigInfos = null; - ConfigInfos adminConfigInfos = null; - - if (connectorUsesProducer(connectorType, connectorProps)) { - stageDescription = "validating producer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - producerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, - connectorConfig, - ProducerConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.PRODUCER, - connectorClientConfigOverridePolicy); - } - } - if (connectorUsesAdmin(connectorType, connectorProps)) { - stageDescription = "validating admin config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - adminConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, - connectorConfig, - AdminClientConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.ADMIN, - connectorClientConfigOverridePolicy); - } - } - if (connectorUsesConsumer(connectorType, connectorProps)) { - stageDescription = "validating consumer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - consumerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, - connectorConfig, - ConsumerConfig.configDef(), - connector.getClass(), - connectorType, - ConnectorClientConfigRequest.ClientType.CONSUMER, - connectorClientConfigOverridePolicy); - } - } return mergeConfigInfos(connType, - configInfos, - producerConfigInfos, - consumerConfigInfos, - adminConfigInfos, - headerConverterConfigInfos, - keyConverterConfigInfos, - valueConverterConfigInfos + connectorConfigInfo, + clientOverrideInfo, + converterConfigInfo ); } } @@ -936,10 +1013,6 @@ private static ConfigValueInfo convertConfigValue(ConfigValue configValue, Type return new ConfigValueInfo(configValue.name(), value, recommendedValues, configValue.errorMessages(), configValue.visible()); } - protected Connector getConnector(String connType) { - return tempConnectors.computeIfAbsent(connType, k -> plugins().newConnector(k)); - } - /** * Retrieves ConnectorType for the class specified in the connector config * @param connConfig the connector config, may be null @@ -950,13 +1023,14 @@ public ConnectorType connectorType(Map connConfig) { if (connConfig == null) { return ConnectorType.UNKNOWN; } - String connClass = connConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + String connClass = connConfig.get(CONNECTOR_CLASS_CONFIG); if (connClass == null) { return ConnectorType.UNKNOWN; } try { - return ConnectorType.from(getConnector(connClass).getClass()); - } catch (ConnectException e) { + VersionRange range = PluginUtils.connectorVersionRequirement(connConfig.get(CONNECTOR_VERSION)); + return ConnectorType.from(cachedConnectors.getConnector(connClass, range).getClass()); + } catch (Exception e) { log.warn("Unable to retrieve connector type", e); return ConnectorType.UNKNOWN; } @@ -1078,25 +1152,33 @@ static Set keysWithVariableValues(Map rawConfig, Pattern @Override public List connectorPluginConfig(String pluginName) { + return connectorPluginConfig(pluginName, null); + } + + @Override + public List connectorPluginConfig(String pluginName, VersionRange range) { + Plugins p = plugins(); Class pluginClass; try { - pluginClass = p.pluginClass(pluginName); + pluginClass = p.pluginClass(pluginName, range); } catch (ClassNotFoundException cnfe) { throw new NotFoundException("Unknown plugin " + pluginName + "."); + } catch (VersionedPluginLoadingException e) { + throw new BadRequestException(e.getMessage(), e); } try (LoaderSwap loaderSwap = p.withClassLoader(pluginClass.getClassLoader())) { - Object plugin = p.newPlugin(pluginName); + Object plugin = p.newPlugin(pluginName, range); // Contains definitions coming from Connect framework ConfigDef baseConfigDefs = null; // Contains definitions specifically declared on the plugin ConfigDef pluginConfigDefs; if (plugin instanceof SinkConnector) { - baseConfigDefs = SinkConnectorConfig.configDef(); + baseConfigDefs = SinkConnectorConfig.enrichedConfigDef(p, pluginName); pluginConfigDefs = ((SinkConnector) plugin).config(); } else if (plugin instanceof SourceConnector) { - baseConfigDefs = SourceConnectorConfig.configDef(); + baseConfigDefs = SourceConnectorConfig.enrichedConfigDef(p, pluginName); pluginConfigDefs = ((SourceConnector) plugin).config(); } else if (plugin instanceof Converter) { pluginConfigDefs = ((Converter) plugin).config(); @@ -1114,8 +1196,9 @@ public List connectorPluginConfig(String pluginName) { // give precedence to the one defined by the plugin class // Preserve the ordering of properties as they're returned from each ConfigDef Map configsMap = new LinkedHashMap<>(pluginConfigDefs.configKeys()); - if (baseConfigDefs != null) + if (baseConfigDefs != null) { baseConfigDefs.configKeys().forEach(configsMap::putIfAbsent); + } List results = new ArrayList<>(); for (ConfigKey configKey : configsMap.values()) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java new file mode 100644 index 0000000000000..59c4281fff885 --- /dev/null +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/CachedConnectors.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime; + +import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; + +import org.apache.maven.artifact.versioning.VersionRange; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class CachedConnectors { + + private static final String LATEST_VERSION = "latest"; + + private final Map> connectors; + private final Map invalidConnectors; + private final Map> invalidVersions; + private final Plugins plugins; + + public CachedConnectors(Plugins plugins) { + this.plugins = plugins; + this.connectors = new ConcurrentHashMap<>(); + this.invalidConnectors = new ConcurrentHashMap<>(); + this.invalidVersions = new ConcurrentHashMap<>(); + } + + private void validate(String connectorName, VersionRange range) throws Exception { + if (invalidConnectors.containsKey(connectorName)) { + throw new Exception(invalidConnectors.get(connectorName)); + } + + String version = range == null ? LATEST_VERSION : range.toString(); + if (invalidVersions.containsKey(connectorName) && invalidVersions.get(connectorName).containsKey(version)) { + throw new Exception(invalidVersions.get(connectorName).get(version)); + } + } + + private Connector lookup(String connectorName, VersionRange range) throws Exception { + String version = range == null ? LATEST_VERSION : range.toString(); + if (connectors.containsKey(connectorName) && connectors.get(connectorName).containsKey(version)) { + return connectors.get(connectorName).get(version); + } + + try { + Connector connector = plugins.newConnector(connectorName, range); + connectors.computeIfAbsent(connectorName, k -> new ConcurrentHashMap<>()).put(version, connector); + return connector; + } catch (VersionedPluginLoadingException e) { + invalidVersions.computeIfAbsent(connectorName, k -> new ConcurrentHashMap<>()).put(version, e); + throw e; + } catch (Exception e) { + invalidConnectors.put(connectorName, e); + throw e; + } + } + + public Connector getConnector(String connectorName, VersionRange range) throws Exception { + validate(connectorName, range); + return lookup(connectorName, range); + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index cb604ad73eef5..929dc57a37709 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -27,7 +27,10 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.runtime.errors.ToleranceType; import org.apache.kafka.connect.runtime.isolation.PluginDesc; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.PluginsRecommenders; +import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; import org.apache.kafka.connect.transforms.Transformation; @@ -35,6 +38,8 @@ import org.apache.kafka.connect.util.ConcreteSubClassValidator; import org.apache.kafka.connect.util.InstantiableClassValidator; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,6 +87,11 @@ public class ConnectorConfig extends AbstractConfig { " or use \"FileStreamSink\" or \"FileStreamSinkConnector\" to make the configuration a bit shorter"; private static final String CONNECTOR_CLASS_DISPLAY = "Connector class"; + public static final String CONNECTOR_VERSION = "connector." + WorkerConfig.PLUGIN_VERSION_SUFFIX; + private static final String CONNECTOR_VERSION_DOC = "Version of the connector."; + private static final String CONNECTOR_VERSION_DISPLAY = "Connector version"; + private static final ConfigDef.Validator CONNECTOR_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String KEY_CONVERTER_CLASS_CONFIG = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG; public static final String KEY_CONVERTER_CLASS_DOC = WorkerConfig.KEY_CONVERTER_CLASS_DOC; public static final String KEY_CONVERTER_CLASS_DISPLAY = "Key converter class"; @@ -90,6 +100,12 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); + public static final String KEY_CONVERTER_VERSION_CONFIG = WorkerConfig.KEY_CONVERTER_VERSION; + private static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; + private static final String KEY_CONVERTER_VERSION_DISPLAY = "Key converter version"; + private static final ConfigDef.Validator KEY_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + + public static final String VALUE_CONVERTER_CLASS_CONFIG = WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG; public static final String VALUE_CONVERTER_CLASS_DOC = WorkerConfig.VALUE_CONVERTER_CLASS_DOC; public static final String VALUE_CONVERTER_CLASS_DISPLAY = "Value converter class"; @@ -98,17 +114,24 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); + public static final String VALUE_CONVERTER_VERSION_CONFIG = WorkerConfig.VALUE_CONVERTER_VERSION; + private static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; + private static final String VALUE_CONVERTER_VERSION_DISPLAY = "Value converter version"; + private static final ConfigDef.Validator VALUE_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String HEADER_CONVERTER_CLASS_CONFIG = WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG; public static final String HEADER_CONVERTER_CLASS_DOC = WorkerConfig.HEADER_CONVERTER_CLASS_DOC; public static final String HEADER_CONVERTER_CLASS_DISPLAY = "Header converter class"; - // The Connector config should not have a default for the header converter, since the absence of a config property means that - // the worker config settings should be used. Thus, we set the default to null here. - public static final String HEADER_CONVERTER_CLASS_DEFAULT = null; private static final ConfigDef.Validator HEADER_CONVERTER_CLASS_VALIDATOR = ConfigDef.CompositeValidator.of( ConcreteSubClassValidator.forSuperClass(HeaderConverter.class), new InstantiableClassValidator() ); + public static final String HEADER_CONVERTER_VERSION_CONFIG = WorkerConfig.HEADER_CONVERTER_VERSION; + private static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; + private static final String HEADER_CONVERTER_VERSION_DISPLAY = "Header converter version"; + private static final ConfigDef.Validator HEADER_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); + public static final String TASKS_MAX_CONFIG = "tasks.max"; private static final String TASKS_MAX_DOC = "Maximum number of tasks to use for this connector."; public static final int TASKS_MAX_DEFAULT = 1; @@ -187,7 +210,11 @@ public class ConnectorConfig extends AbstractConfig { public static final String CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX = "admin.override."; public static final String PREDICATES_PREFIX = "predicates."; - private final EnrichedConnectorConfig enrichedConfig; + private static final PluginsRecommenders EMPTY_RECOMMENDER = new PluginsRecommenders(); + private static final ConverterDefaults CONVERTER_DEFAULTS = new ConverterDefaults(null, null); + + private final ConnectorConfig.EnrichedConnectorConfig enrichedConfig; + private static class EnrichedConnectorConfig extends AbstractConfig { EnrichedConnectorConfig(ConfigDef configDef, Map props) { super(configDef, props); @@ -199,17 +226,27 @@ public Object get(String key) { } } - public static ConfigDef configDef() { + protected static ConfigDef configDef( + String defaultConnectorVersion, + ConverterDefaults keyConverterDefaults, + ConverterDefaults valueConverterDefaults, + ConverterDefaults headerConverterDefaults, + PluginsRecommenders recommender + ) { int orderInGroup = 0; int orderInErrorGroup = 0; return new ConfigDef() .define(NAME_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, nonEmptyStringWithoutControlChars(), Importance.HIGH, NAME_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, NAME_DISPLAY) .define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.LONG, CONNECTOR_CLASS_DISPLAY) + .define(CONNECTOR_VERSION, Type.STRING, defaultConnectorVersion, CONNECTOR_VERSION_VALIDATOR, Importance.MEDIUM, CONNECTOR_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONNECTOR_VERSION_DISPLAY, recommender.connectorPluginVersionRecommender()) .define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASK_MAX_DISPLAY) .define(TASKS_MAX_ENFORCE_CONFIG, Type.BOOLEAN, TASKS_MAX_ENFORCE_DEFAULT, Importance.LOW, TASKS_MAX_ENFORCE_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASKS_MAX_ENFORCE_DISPLAY) - .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY) - .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY) - .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY) + .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, keyConverterDefaults.type, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) + .define(KEY_CONVERTER_VERSION_CONFIG, Type.STRING, keyConverterDefaults.version, KEY_CONVERTER_VERSION_VALIDATOR, Importance.LOW, KEY_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_VERSION_DISPLAY, recommender.keyConverterPluginVersionRecommender()) + .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, valueConverterDefaults.type, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) + .define(VALUE_CONVERTER_VERSION_CONFIG, Type.STRING, valueConverterDefaults.version, VALUE_CONVERTER_VERSION_VALIDATOR, Importance.LOW, VALUE_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_VERSION_DISPLAY, recommender.valueConverterPluginVersionRecommender()) + .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, headerConverterDefaults.type, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY, recommender.headerConverterPluginRecommender()) + .define(HEADER_CONVERTER_VERSION_CONFIG, Type.STRING, headerConverterDefaults.version, HEADER_CONVERTER_VERSION_VALIDATOR, Importance.LOW, HEADER_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_VERSION_DISPLAY, recommender.headerConverterPluginVersionRecommender()) .define(TRANSFORMS_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) .define(PREDICATES_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) .define(CONFIG_RELOAD_ACTION_CONFIG, Type.STRING, CONFIG_RELOAD_ACTION_RESTART, @@ -226,6 +263,28 @@ public static ConfigDef configDef() { ERRORS_LOG_ENABLE_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_ENABLE_DISPLAY) .define(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, Type.BOOLEAN, ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT, Importance.MEDIUM, ERRORS_LOG_INCLUDE_MESSAGES_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_INCLUDE_MESSAGES_DISPLAY); + + } + + public static ConfigDef configDef() { + return configDef(null, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); + } + + // ConfigDef with additional defaults and recommenders + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + PluginsRecommenders recommender = new PluginsRecommenders(plugins); + ConverterDefaults keyConverterDefaults = converterDefaults(plugins, KEY_CONVERTER_CLASS_CONFIG, + WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION, connProps, workerConfig, Converter.class); + ConverterDefaults valueConverterDefaults = converterDefaults(plugins, VALUE_CONVERTER_CLASS_CONFIG, + WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION, connProps, workerConfig, Converter.class); + ConverterDefaults headerConverterDefaults = converterDefaults(plugins, HEADER_CONVERTER_CLASS_CONFIG, + WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION, connProps, workerConfig, HeaderConverter.class); + return configDef(plugins.latestVersion(connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG)), + keyConverterDefaults, valueConverterDefaults, headerConverterDefaults, recommender); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(plugins.latestVersion(connectorClass), CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); } private static ConfigDef.CompositeValidator aliasValidator(String kind) { @@ -271,7 +330,7 @@ public long errorMaxDelayInMillis() { public ToleranceType errorToleranceType() { String tolerance = getString(ERRORS_TOLERANCE_CONFIG); - for (ToleranceType type: ToleranceType.values()) { + for (ToleranceType type : ToleranceType.values()) { if (type.name().equalsIgnoreCase(tolerance)) { return type; } @@ -360,17 +419,17 @@ protected ConfigDef initialConfigDef() { @Override protected Stream> configDefsForClass(String typeConfig) { return super.configDefsForClass(typeConfig) - .filter(entry -> { - // The implicit parameters mask any from the transformer with the same name - if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) - || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { - log.warn("Transformer config {} is masked by implicit config of that name", - entry.getKey()); - return false; - } else { - return true; - } - }); + .filter(entry -> { + // The implicit parameters mask any from the transformer with the same name + if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) + || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { + log.warn("Transformer config {} is masked by implicit config of that name", + entry.getKey()); + return false; + } else { + return true; + } + }); } @Override @@ -405,6 +464,87 @@ protected ConfigDef config(Predicate predicate) { return newDef; } + private static ConverterDefaults converterDefaults( + Plugins plugins, + String connectorConverterConfig, + String workerConverterConfig, + String workerConverterVersionConfig, + Map connectorProps, + WorkerConfig workerConfig, + Class converterType + ) { + /* + if a converter is specified in the connector config it overrides the worker config for the corresponding converter + otherwise the worker config is used, hence if the converter is not provided in the connector config, the default + is the one provided in the worker config + + for converters which version is used depends on a several factors with multi-versioning support + A. If the converter class is provided as part of the connector properties + 1. if the version is not provided, + - if the converter is packaged with the connector then, the packaged version is used + - if the converter is not packaged with the connector, the latest version is used + 2. if the version is provided, the provided version is used + B. If the converter class is not provided as part of the connector properties, but provided as part of the worker properties + 1. if the version is not provided, the latest version is used + 2. if the version is provided, the provided version is used + C. If the converter class is not provided as part of the connector properties and not provided as part of the worker properties, + the converter to use is unknown hence no default version can be determined (null) + + Note: Connect when using service loading has an issue outlined in KAFKA-18119. The issue means that the above + logic does not hold currently for clusters using service loading when converters are defined in the connector. + However, the logic to determine the default should ideally follow the one outlined above, and the code here + should still show the correct default version regardless of the bug. + */ + final String connectorConverter = connectorProps.get(connectorConverterConfig); + // since header converter defines a default in the worker config we need to handle it separately + final String workerConverter = workerConverterConfig.equals(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG) ? + workerConfig.getClass(workerConverterConfig).getName() : workerConfig.originalsStrings().get(workerConverterConfig); + final String connectorClass = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + final String connectorVersion = connectorProps.get(ConnectorConfig.CONNECTOR_VERSION); + String type = null; + if (connectorClass == null || (connectorConverter == null && workerConverter == null)) { + return new ConverterDefaults(null, null); + } + // update the default of connector converter based on if the worker converter is provided + type = workerConverter; + + String version = null; + if (connectorConverter != null) { + version = fetchPluginVersion(plugins, connectorConverter, connectorVersion, connectorConverter); + } else { + version = workerConfig.originalsStrings().get(workerConverterVersionConfig); + if (version == null) { + version = plugins.latestVersion(workerConverter); + } + } + return new ConverterDefaults(type, version); + } + + private static void updateKeyDefault(ConfigDef configDef, String versionConfigKey, String versionDefault) { + ConfigDef.ConfigKey key = configDef.configKeys().get(versionConfigKey); + if (key == null) { + return; + } + configDef.configKeys().put(versionConfigKey, new ConfigDef.ConfigKey( + versionConfigKey, key.type, versionDefault, key.validator, key.importance, key.documentation, key.group, key.orderInGroup, key.width, key.displayName, key.dependents, key.recommender, false + )); + } + + @SuppressWarnings("unchecked") + private static String fetchPluginVersion(Plugins plugins, String connectorClass, String connectorVersion, String pluginName) { + if (pluginName == null) { + return null; + } + try { + VersionRange range = PluginUtils.connectorVersionRequirement(connectorVersion); + return plugins.pluginVersion(pluginName, plugins.pluginLoader(connectorClass, range)); + } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { + // these errors should be captured in other places, so we can ignore them here + log.warn("Failed to determine default plugin version for {}", connectorClass, e); + } + return null; + } + /** * An abstraction over "enrichable plugins" ({@link Transformation}s and {@link Predicate}s) used for computing the * contribution to a Connectors ConfigDef. @@ -455,14 +595,14 @@ void enrich(ConfigDef newDef) { final String typeConfig = prefix + "type"; final ConfigDef.Validator typeValidator = ConfigDef.LambdaValidator.with( - (String name, Object value) -> { - validateProps(prefix); - // The value will be null if the class couldn't be found; no point in performing follow-up validation - if (value != null) { - getConfigDefFromConfigProvidingClass(typeConfig, (Class) value); - } - }, - () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); + (String name, Object value) -> { + validateProps(prefix); + // The value will be null if the class couldn't be found; no point in performing follow-up validation + if (value != null) { + getConfigDefFromConfigProvidingClass(typeConfig, (Class) value); + } + }, + () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); newDef.define(typeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH, "Class for the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, baseClass.getSimpleName() + " type for " + alias, @@ -475,7 +615,8 @@ void enrich(ConfigDef newDef) { } /** Subclasses can add extra validation of the {@link #props}. */ - protected void validateProps(String prefix) { } + protected void validateProps(String prefix) { + } /** * Populates the ConfigDef according to the configs returned from {@code configs()} method of class @@ -486,7 +627,6 @@ protected ConfigDef populateConfigDef(String typeConfig) { try { configDefsForClass(typeConfig) .forEach(entry -> configDef.define(entry.getValue())); - } catch (ConfigException e) { if (requireFullConfig) { throw e; @@ -533,10 +673,10 @@ ConfigDef getConfigDefFromConfigProvidingClass(String key, Class cls) { ConfigDef configDef = config(pluginInstance); if (null == configDef) { throw new ConnectException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - cls.getName() - ) + String.format( + "%s.config() must return a ConfigDef that is not null.", + cls.getName() + ) ); } return configDef; @@ -576,4 +716,34 @@ public boolean visible(String name, Map parsedConfig) { } } + private static class ConverterDefaults { + private final String type; + private final String version; + + public ConverterDefaults(String type, String version) { + this.type = type; + this.version = version; + } + + public String type() { + return type; + } + + public String version() { + return version; + } + } + + public static class PluginVersionValidator implements ConfigDef.Validator { + + @Override + public void ensureValid(String name, Object value) { + + try { + PluginUtils.connectorVersionRequirement((String) value); + } catch (InvalidVersionSpecificationException e) { + throw new ConfigException(name, value, e.getMessage()); + } + } + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java index 52be401bbfaba..859e3f2728e12 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java @@ -32,6 +32,8 @@ import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; +import org.apache.maven.artifact.versioning.VersionRange; + import java.util.Collection; import java.util.List; import java.util.Map; @@ -322,6 +324,8 @@ default void validateConnectorConfig(Map connectorConfig, Callba */ List connectorPluginConfig(String pluginName); + List connectorPluginConfig(String pluginName, VersionRange version); + /** * Get the current offsets for a connector. * @param connName the name of the connector whose offsets are to be retrieved diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java index 2ab7dfa089763..4584255e23132 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java @@ -73,19 +73,29 @@ public class SinkConnectorConfig extends ConnectorConfig { "keys, all error context header keys will start with __connect.errors."; private static final String DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY = "Enable Error Context Headers"; - static final ConfigDef CONFIG = ConnectorConfig.configDef() - .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) - .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) - .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) - .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) - .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); + private static ConfigDef configDef(ConfigDef baseConfigs) { + return baseConfigs + .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) + .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) + .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) + .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) + .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); + } public static ConfigDef configDef() { - return CONFIG; + return configDef(ConnectorConfig.configDef()); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); } public SinkConnectorConfig(Plugins plugins, Map props) { - super(plugins, CONFIG, props); + super(plugins, configDef(), props); } /** @@ -206,6 +216,6 @@ public boolean enableErrantRecordReporter() { } public static void main(String[] args) { - System.out.println(CONFIG.toHtml(4, config -> "sinkconnectorconfigs_" + config)); + System.out.println(configDef().toHtml(4, config -> "sinkconnectorconfigs_" + config)); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java index bc797563b10dd..effa31353764c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java @@ -125,10 +125,10 @@ private static class EnrichedSourceConnectorConfig extends ConnectorConfig { private final EnrichedSourceConnectorConfig enrichedSourceConfig; private final String offsetsTopic; - public static ConfigDef configDef() { + private static ConfigDef configDef(ConfigDef baseConfigDef) { ConfigDef.Validator atLeastZero = ConfigDef.Range.atLeast(0); int orderInGroup = 0; - return new ConfigDef(ConnectorConfig.configDef()) + return new ConfigDef(baseConfigDef) .define( TOPIC_CREATION_GROUPS_CONFIG, ConfigDef.Type.LIST, @@ -203,6 +203,18 @@ public static ConfigDef configDef() { OFFSETS_TOPIC_DISPLAY); } + public static ConfigDef configDef() { + return configDef(ConnectorConfig.configDef()); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); + } + + public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { + return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); + } + public static ConfigDef embedDefaultGroup(ConfigDef baseConfigDef) { String defaultGroup = "default"; ConfigDef newDefaultDef = new ConfigDef(baseConfigDef); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java index 591e9816a7a50..1f97a907b642e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java @@ -278,6 +278,10 @@ public void stop() { ThreadUtils.shutdownExecutorServiceQuietly(executor, EXECUTOR_SHUTDOWN_TERMINATION_TIMEOUT_MS, TimeUnit.MILLISECONDS); } + public WorkerConfig config() { + return config; + } + /** * Start a connector managed by this worker. * diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java index ca188ffd97af7..a68cdb4ea03d0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java @@ -70,6 +70,8 @@ public class WorkerConfig extends AbstractConfig { public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; public static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC; + public static final String PLUGIN_VERSION_SUFFIX = "plugin.version"; + public static final String KEY_CONVERTER_CLASS_CONFIG = "key.converter"; public static final String KEY_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -77,6 +79,10 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; + public static final String KEY_CONVERTER_VERSION = "key.converter." + PLUGIN_VERSION_SUFFIX; + public static final String KEY_CONVERTER_VERSION_DEFAULT = null; + public static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; + public static final String VALUE_CONVERTER_CLASS_CONFIG = "value.converter"; public static final String VALUE_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -84,6 +90,10 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; + public static final String VALUE_CONVERTER_VERSION = "value.converter." + PLUGIN_VERSION_SUFFIX; + public static final String VALUE_CONVERTER_VERSION_DEFAULT = null; + public static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; + public static final String HEADER_CONVERTER_CLASS_CONFIG = "header.converter"; public static final String HEADER_CONVERTER_CLASS_DOC = "HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -93,6 +103,10 @@ public class WorkerConfig extends AbstractConfig { " header values to strings and deserialize them by inferring the schemas."; public static final String HEADER_CONVERTER_CLASS_DEFAULT = SimpleHeaderConverter.class.getName(); + public static final String HEADER_CONVERTER_VERSION = "header.converter." + PLUGIN_VERSION_SUFFIX; + public static final String HEADER_CONVERTER_VERSION_DEFAULT = null; + public static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; + public static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG = "task.shutdown.graceful.timeout.ms"; private static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC = @@ -200,8 +214,12 @@ protected static ConfigDef baseConfigDef() { CLIENT_DNS_LOOKUP_DOC) .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_CONVERTER_CLASS_DOC) + .define(KEY_CONVERTER_VERSION, Type.STRING, + KEY_CONVERTER_VERSION_DEFAULT, Importance.LOW, KEY_CONVERTER_VERSION_DOC) .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_CONVERTER_CLASS_DOC) + .define(VALUE_CONVERTER_VERSION, Type.STRING, + VALUE_CONVERTER_VERSION_DEFAULT, Importance.LOW, VALUE_CONVERTER_VERSION_DOC) .define(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Type.LONG, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT, Importance.LOW, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC) @@ -237,6 +255,8 @@ protected static ConfigDef baseConfigDef() { .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, Importance.LOW, HEADER_CONVERTER_CLASS_DOC) + .define(HEADER_CONVERTER_VERSION, Type.STRING, + HEADER_CONVERTER_VERSION_DEFAULT, Importance.LOW, HEADER_CONVERTER_VERSION_DOC) .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, Collections.emptyList(), Importance.LOW, CONFIG_PROVIDERS_DOC) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java index ff7a9d3149d58..0d2a664cbc251 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java @@ -112,8 +112,9 @@ import javax.crypto.KeyGenerator; import javax.crypto.SecretKey; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; + +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java index 2ca7979a52636..6a85043928751 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java @@ -139,6 +139,23 @@ String latestVersion(String classOrAlias) { return inner.lastKey().version(); } + String versionInLocation(String classOrAlias, String location) { + if (classOrAlias == null) { + return null; + } + String fullName = aliases.getOrDefault(classOrAlias, classOrAlias); + SortedMap, ClassLoader> inner = pluginLoaders.get(fullName); + if (inner == null) { + return null; + } + for (Map.Entry, ClassLoader> entry : inner.entrySet()) { + if (entry.getKey().location().equals(location)) { + return entry.getKey().version(); + } + } + return null; + } + private ClassLoader findPluginLoader( SortedMap, ClassLoader> loaders, String pluginName, @@ -226,8 +243,12 @@ private void verifyClasspathVersionedPlugin(String name, Class plugin, Versio )); } - List> classpathPlugins = scannedPlugin.keySet().stream() + // if a plugin implements two interfaces (like JsonConverter implements both converter and header converter) + // it will have two entries under classpath, one for each scan. Hence, we count distinct by version. + List classpathPlugins = scannedPlugin.keySet().stream() .filter(pluginDesc -> pluginDesc.location().equals("classpath")) + .map(PluginDesc::version) + .distinct() .collect(Collectors.toList()); if (classpathPlugins.size() > 1) { @@ -239,7 +260,7 @@ private void verifyClasspathVersionedPlugin(String name, Class plugin, Versio } else if (classpathPlugins.isEmpty()) { throw new VersionedPluginLoadingException("Invalid plugin found in classpath"); } else { - pluginVersion = classpathPlugins.get(0).version(); + pluginVersion = classpathPlugins.get(0); if (!range.containsVersion(new DefaultArtifactVersion(pluginVersion))) { throw new VersionedPluginLoadingException(String.format( "Plugin %s has version %s which does not match the required version range %s", diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java index ff575e8edf890..1fecb59ddb579 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java @@ -60,8 +60,9 @@ public String toString() { ", location='" + location + '\'' + '}'; } + @JsonIgnore - DefaultArtifactVersion encodedVersion() { + public DefaultArtifactVersion encodedVersion() { return encodedVersion; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java index 56567b3bee709..ae6d3ba3a1cd4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java @@ -493,6 +493,7 @@ private static Collection distinctUrls(Collection urls) { } return distinct.values(); } + public static VersionRange connectorVersionRequirement(String version) throws InvalidVersionSpecificationException { if (version == null || version.equals("latest")) { return null; @@ -500,13 +501,13 @@ public static VersionRange connectorVersionRequirement(String version) throws In version = version.trim(); // check first if the given version is valid - VersionRange.createFromVersionSpec(version); + VersionRange range = VersionRange.createFromVersionSpec(version); - // now if the version is not enclosed we consider it as a hard requirement and enclose it in [] - if (!version.startsWith("[") && !version.startsWith("(")) { - version = "[" + version + "]"; + if (range.hasRestrictions()) { + return range; } + // now if the version is not enclosed we consider it as a hard requirement and enclose it in [] + version = "[" + version + "]"; return VersionRange.createFromVersionSpec(version); } - } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java index 28c1f80c61815..b8027d71d5675 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java @@ -269,6 +269,13 @@ public String latestVersion(String classOrAlias) { return delegatingLoader.latestVersion(classOrAlias); } + public String pluginVersion(String classOrAlias, ClassLoader sourceLoader) { + if (!(sourceLoader instanceof PluginClassLoader)) { + return latestVersion(classOrAlias); + } + return delegatingLoader.versionInLocation(classOrAlias, ((PluginClassLoader) sourceLoader).location()); + } + public DelegatingClassLoader delegatingLoader() { return delegatingLoader; } @@ -278,7 +285,7 @@ public ClassLoader connectorLoader(String connectorClassOrAlias) { return delegatingLoader.loader(connectorClassOrAlias); } - public ClassLoader pluginLoader(String classOrAlias, VersionRange range) throws ClassNotFoundException, VersionedPluginLoadingException { + public ClassLoader pluginLoader(String classOrAlias, VersionRange range) { return delegatingLoader.loader(classOrAlias, range); } @@ -298,7 +305,7 @@ public Set> sinkConnectors() { return scanResult.sinkConnectors(); } - public Set> sinkConnectors(String connectorClassOrAlias) { + Set> sinkConnectors(String connectorClassOrAlias) { return pluginsOfClass(connectorClassOrAlias, scanResult.sinkConnectors()); } @@ -306,7 +313,7 @@ public Set> sourceConnectors() { return scanResult.sourceConnectors(); } - public Set> sourceConnectors(String connectorClassOrAlias) { + Set> sourceConnectors(String connectorClassOrAlias) { return pluginsOfClass(connectorClassOrAlias, scanResult.sourceConnectors()); } @@ -367,6 +374,13 @@ public Object newPlugin(String classOrAlias, VersionRange range) throws Versione return newPlugin(klass); } + public Object newPlugin(String classOrAlias, VersionRange range, ClassLoader sourceLoader) throws ClassNotFoundException { + if (range == null && sourceLoader instanceof PluginClassLoader) { + sourceLoader.loadClass(classOrAlias); + } + return newPlugin(classOrAlias, range); + } + public Connector newConnector(String connectorClassOrAlias) { Class klass = connectorClass(connectorClassOrAlias); return newPlugin(klass); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java new file mode 100644 index 0000000000000..8cf209ac08c91 --- /dev/null +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginsRecommenders.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.connect.runtime.isolation; + +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.runtime.ConnectorConfig; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class PluginsRecommenders { + + private final Plugins plugins; + private final ConverterPluginRecommender converterPluginRecommender; + private final ConnectorPluginVersionRecommender connectorPluginVersionRecommender; + private final HeaderConverterPluginRecommender headerConverterPluginRecommender; + private final KeyConverterPluginVersionRecommender keyConverterPluginVersionRecommender; + private final ValueConverterPluginVersionRecommender valueConverterPluginVersionRecommender; + private final HeaderConverterPluginVersionRecommender headerConverterPluginVersionRecommender; + + public PluginsRecommenders() { + this(null); + } + + public PluginsRecommenders(Plugins plugins) { + this.plugins = plugins; + this.converterPluginRecommender = new ConverterPluginRecommender(); + this.connectorPluginVersionRecommender = new ConnectorPluginVersionRecommender(); + this.headerConverterPluginRecommender = new HeaderConverterPluginRecommender(); + this.keyConverterPluginVersionRecommender = new KeyConverterPluginVersionRecommender(); + this.valueConverterPluginVersionRecommender = new ValueConverterPluginVersionRecommender(); + this.headerConverterPluginVersionRecommender = new HeaderConverterPluginVersionRecommender(); + } + + public ConverterPluginRecommender converterPluginRecommender() { + return converterPluginRecommender; + } + + public ConnectorPluginVersionRecommender connectorPluginVersionRecommender() { + return connectorPluginVersionRecommender; + } + + public HeaderConverterPluginRecommender headerConverterPluginRecommender() { + return headerConverterPluginRecommender; + } + + public KeyConverterPluginVersionRecommender keyConverterPluginVersionRecommender() { + return keyConverterPluginVersionRecommender; + } + + public ValueConverterPluginVersionRecommender valueConverterPluginVersionRecommender() { + return valueConverterPluginVersionRecommender; + } + + public HeaderConverterPluginVersionRecommender headerConverterPluginVersionRecommender() { + return headerConverterPluginVersionRecommender; + } + + public class ConnectorPluginVersionRecommender implements ConfigDef.Recommender { + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + String connectorClassOrAlias = (String) parsedConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + if (connectorClassOrAlias == null) { + //should never happen + return Collections.emptyList(); + } + List sourceConnectors = plugins.sourceConnectors(connectorClassOrAlias).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + if (!sourceConnectors.isEmpty()) { + return sourceConnectors; + } + return plugins.sinkConnectors(connectorClassOrAlias).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return parsedConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG) != null; + } + + } + + public class ConverterPluginRecommender implements ConfigDef.Recommender { + + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + return plugins.converters().stream() + .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return true; + } + } + + public class HeaderConverterPluginRecommender implements ConfigDef.Recommender { + + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + return plugins.headerConverters().stream() + .map(PluginDesc::pluginClass).distinct().collect(Collectors.toList()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return true; + } + } + + public abstract class ConverterPluginVersionRecommender implements ConfigDef.Recommender { + + protected Function> recommendations() { + return converterClass -> plugins.converters(converterClass).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + + protected abstract String converterConfig(); + + @SuppressWarnings({"rawtypes"}) + @Override + public List validValues(String name, Map parsedConfig) { + if (plugins == null) { + return Collections.emptyList(); + } + if (parsedConfig.get(converterConfig()) == null) { + return Collections.emptyList(); + } + Class converterClass = (Class) parsedConfig.get(converterConfig()); + return recommendations().apply(converterClass.getName()); + } + + @Override + public boolean visible(String name, Map parsedConfig) { + return parsedConfig.get(converterConfig()) != null; + } + } + + public class KeyConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; + } + + } + + public class ValueConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; + } + } + + public class HeaderConverterPluginVersionRecommender extends ConverterPluginVersionRecommender { + + @Override + protected String converterConfig() { + return ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; + } + + @Override + protected Function> recommendations() { + return converterClass -> plugins.headerConverters(converterClass).stream() + .map(PluginDesc::version).distinct().collect(Collectors.toList()); + } + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java index bcd4fa18fc29c..ca2ab18d43b43 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestConfigurable.java @@ -24,8 +24,8 @@ import java.util.Map; import java.util.Objects; -import javax.ws.rs.core.Configurable; -import javax.ws.rs.core.Configuration; +import jakarta.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configuration; /** * The implementation delegates to {@link ResourceConfig} so that we can handle duplicate diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java index 8098f8c97cc53..1990ebdf36926 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java @@ -20,7 +20,7 @@ import org.apache.kafka.connect.health.ConnectClusterState; import org.apache.kafka.connect.rest.ConnectRestExtensionContext; -import javax.ws.rs.core.Configurable; +import jakarta.ws.rs.core.Configurable; public class ConnectRestExtensionContextImpl implements ConnectRestExtensionContext { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java index dd38f769fe8ff..4dedc7289b8f4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java @@ -33,9 +33,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; public class HerderRequestHandler { @@ -113,6 +113,7 @@ public T completeOrForwardRequest(FutureCallback cb, } String forwardUrl = uriBuilder.build().toString(); log.debug("Forwarding request {} {} {}", forwardUrl, method, body); + // TODO, we may need to set the request timeout as Idle timeout on the HttpClient return translator.translate(restClient.httpRequest(forwardUrl, method, headers, body, resultType)); } else { log.error("Request '{} {}' failed because it couldn't find the target Connect worker within two hops (between workers).", diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java index 6fe4134d1c52a..902187a83fc8a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignature.java @@ -20,7 +20,7 @@ import org.apache.kafka.connect.runtime.distributed.Crypto; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.Request; import java.security.InvalidKeyException; import java.security.MessageDigest; @@ -31,7 +31,8 @@ import javax.crypto.Mac; import javax.crypto.SecretKey; -import javax.ws.rs.core.HttpHeaders; + +import jakarta.ws.rs.core.HttpHeaders; public class InternalRequestSignature { @@ -59,8 +60,10 @@ public static void addToRequest(Crypto crypto, SecretKey key, byte[] requestBody throw new ConnectException(e); } byte[] requestSignature = sign(mac, key, requestBody); - request.header(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature)) - .header(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm); + request.headers(field -> { + field.add(InternalRequestSignature.SIGNATURE_HEADER, Base64.getEncoder().encodeToString(requestSignature)); + field.add(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER, signatureAlgorithm); + }); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index a6db20ce64e54..511f7f9f2c7a2 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -26,13 +26,15 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.client.util.StringContentProvider; +import org.eclipse.jetty.client.Request; +import org.eclipse.jetty.client.StringRequestContent; +import org.eclipse.jetty.client.transport.HttpClientTransportDynamic; import org.eclipse.jetty.http.HttpField; import org.eclipse.jetty.http.HttpFields; import org.eclipse.jetty.http.HttpStatus; +import org.eclipse.jetty.io.ClientConnector; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +48,9 @@ import java.util.concurrent.TimeoutException; import javax.crypto.SecretKey; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; + +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.Response; /** * Client for outbound REST requests to other members of a Connect cluster @@ -65,7 +68,15 @@ public RestClient(AbstractConfig config) { // VisibleForTesting HttpClient httpClient(SslContextFactory.Client sslContextFactory) { - return sslContextFactory != null ? new HttpClient(sslContextFactory) : new HttpClient(); + final HttpClient client; + if (sslContextFactory != null) { + ClientConnector clientConnector = new ClientConnector(); + clientConnector.setSslContextFactory(sslContextFactory); + client = new HttpClient(new HttpClientTransportDynamic(clientConnector)); + } else { + client = new HttpClient(); + } + return client; } /** @@ -162,7 +173,7 @@ private HttpResponse httpRequest(HttpClient client, String url, String me addHeadersToRequest(headers, req); if (serializedBody != null) { - req.content(new StringContentProvider(serializedBody, StandardCharsets.UTF_8), "application/json"); + req.body(new StringRequestContent("application/json", serializedBody, StandardCharsets.UTF_8)); } if (sessionKey != null && requestSignatureAlgorithm != null) { @@ -220,7 +231,7 @@ private static void addHeadersToRequest(HttpHeaders headers, Request req) { if (headers != null) { String credentialAuthorization = headers.getHeaderString(HttpHeaders.AUTHORIZATION); if (credentialAuthorization != null) { - req.header(HttpHeaders.AUTHORIZATION, credentialAuthorization); + req.headers(field -> field.add(HttpHeaders.AUTHORIZATION, credentialAuthorization)); } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java index 9468166763cea..b6c7690a51d79 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java @@ -28,8 +28,12 @@ import org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper; import org.apache.kafka.connect.runtime.rest.util.SSLUtils; -import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; +import com.fasterxml.jackson.jakarta.rs.json.JacksonJsonProvider; +import org.eclipse.jetty.ee10.servlet.FilterHolder; +import org.eclipse.jetty.ee10.servlet.ServletContextHandler; +import org.eclipse.jetty.ee10.servlet.ServletHolder; +import org.eclipse.jetty.ee10.servlets.HeaderFilter; import org.eclipse.jetty.server.Connector; import org.eclipse.jetty.server.CustomRequestLog; import org.eclipse.jetty.server.Handler; @@ -37,12 +41,8 @@ import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.server.Slf4jRequestLogWriter; import org.eclipse.jetty.server.handler.ContextHandlerCollection; +import org.eclipse.jetty.server.handler.CrossOriginHandler; import org.eclipse.jetty.server.handler.StatisticsHandler; -import org.eclipse.jetty.servlet.FilterHolder; -import org.eclipse.jetty.servlet.ServletContextHandler; -import org.eclipse.jetty.servlet.ServletHolder; -import org.eclipse.jetty.servlets.CrossOriginFilter; -import org.eclipse.jetty.servlets.HeaderFilter; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.glassfish.hk2.utilities.Binder; import org.glassfish.hk2.utilities.binding.AbstractBinder; @@ -60,12 +60,13 @@ import java.util.EnumSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; -import javax.servlet.DispatcherType; -import javax.ws.rs.core.UriBuilder; +import jakarta.servlet.DispatcherType; +import jakarta.ws.rs.core.UriBuilder; /** * Embedded server for the REST API that provides the control plane for Kafka Connect workers. @@ -189,6 +190,9 @@ public final Connector createConnector(String listener, boolean isAdmin) { connector.setPort(port); + // TODO: do we need this? + connector.setIdleTimeout(requestTimeout.timeoutMs()); + return connector; } @@ -263,20 +267,21 @@ protected final void initializeResources() { ServletHolder adminServletHolder = new ServletHolder(new ServletContainer(adminResourceConfig)); adminContext.setContextPath("/"); adminContext.addServlet(adminServletHolder, "/*"); - adminContext.setVirtualHosts(new String[]{"@" + ADMIN_SERVER_CONNECTOR_NAME}); + adminContext.setVirtualHosts(List.of("@" + ADMIN_SERVER_CONNECTOR_NAME)); contextHandlers.add(adminContext); } String allowedOrigins = config.allowedOrigins(); if (!Utils.isBlank(allowedOrigins)) { - FilterHolder filterHolder = new FilterHolder(new CrossOriginFilter()); - filterHolder.setName("cross-origin"); - filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_ORIGINS_PARAM, allowedOrigins); + CrossOriginHandler crossOriginHandler = new CrossOriginHandler(); + crossOriginHandler.setAllowedOriginPatterns(Set.of(allowedOrigins.split(","))); String allowedMethods = config.allowedMethods(); if (!Utils.isBlank(allowedMethods)) { - filterHolder.setInitParameter(CrossOriginFilter.ALLOWED_METHODS_PARAM, allowedMethods); + crossOriginHandler.setAllowedMethods(Set.of(allowedMethods.split(","))); } - context.addFilter(filterHolder, "/*", EnumSet.of(DispatcherType.REQUEST)); + // Setting to true matches the previously used CrossOriginFilter + crossOriginHandler.setDeliverPreflightRequests(true); + context.insertHandler(crossOriginHandler); } String headerConfig = config.responseHeaders(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java index 33bbb04b3f75c..1e33732dc58db 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/BadRequestException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.connect.runtime.rest.errors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; public class BadRequestException extends ConnectRestException { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java index 9ce3e9e74d115..91c337c234b99 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java @@ -23,14 +23,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import javax.ws.rs.ext.ExceptionMapper; +import jakarta.ws.rs.WebApplicationException; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriInfo; +import jakarta.ws.rs.ext.ExceptionMapper; /** - * Maps uncaught exceptions thrown while handling REST requests to appropriate {@link javax.ws.rs.core.Response}s + * Maps uncaught exceptions thrown while handling REST requests to appropriate {@link jakarta.ws.rs.core.Response}s */ public class ConnectExceptionMapper implements ExceptionMapper { private static final Logger log = LoggerFactory.getLogger(ConnectExceptionMapper.class); @@ -49,7 +49,7 @@ public Response toResponse(Exception exception) { .build(); } - if (exception instanceof NotFoundException || exception instanceof javax.ws.rs.NotFoundException) { + if (exception instanceof NotFoundException || exception instanceof jakarta.ws.rs.NotFoundException) { return Response.status(Response.Status.NOT_FOUND) .entity(new ErrorMessage(Response.Status.NOT_FOUND.getStatusCode(), exception.getMessage())) .build(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java index f45f72ddd8bd3..0d45ea578be86 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectRestException.java @@ -18,7 +18,7 @@ import org.apache.kafka.connect.errors.ConnectException; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; public class ConnectRestException extends ConnectException { private final int statusCode; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java index 6de327bf5578b..46c78027f251f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java @@ -20,6 +20,7 @@ import org.apache.kafka.connect.runtime.Herder; import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.apache.kafka.connect.runtime.isolation.PluginType; +import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.rest.RestRequestTimeout; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; import org.apache.kafka.connect.runtime.rest.entities.ConfigKeyInfo; @@ -29,6 +30,9 @@ import org.apache.kafka.connect.util.Stage; import org.apache.kafka.connect.util.StagedTimeoutException; +import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; +import org.apache.maven.artifact.versioning.VersionRange; + import java.time.Instant; import java.util.ArrayList; import java.util.Collection; @@ -41,21 +45,20 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import javax.inject.Inject; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.Consumes; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; @Path("/connector-plugins") @Produces(MediaType.APPLICATION_JSON) @@ -154,9 +157,18 @@ public List listConnectorPlugins( @GET @Path("/{pluginName}/config") @Operation(summary = "Get the configuration definition for the specified pluginName") - public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName) { + public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName, + final @QueryParam("version") @DefaultValue("latest") String version) { + + VersionRange range = null; + try { + range = PluginUtils.connectorVersionRequirement(version); + } catch (InvalidVersionSpecificationException e) { + throw new BadRequestException("Invalid version specification: " + version, e); + } + synchronized (this) { - return herder.connectorPluginConfig(pluginName); + return herder.connectorPluginConfig(pluginName, range); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java index dec053b0a4105..efbf39d790bef 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java @@ -46,29 +46,28 @@ import java.util.List; import java.util.Map; -import javax.inject.Inject; -import javax.servlet.ServletContext; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PATCH; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.core.UriInfo; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.servlet.ServletContext; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PATCH; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriBuilder; +import jakarta.ws.rs.core.UriInfo; import static org.apache.kafka.connect.runtime.rest.HerderRequestHandler.IdentityTranslator; import static org.apache.kafka.connect.runtime.rest.HerderRequestHandler.Translator; @@ -81,7 +80,7 @@ public class ConnectorsResource { private final Herder herder; private final HerderRequestHandler requestHandler; - @javax.ws.rs.core.Context + @jakarta.ws.rs.core.Context private ServletContext context; private final boolean isTopicTrackingDisabled; private final boolean isTopicTrackingResetDisabled; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java index b9756c381d99a..8ffec431f36de 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java @@ -30,18 +30,17 @@ import java.util.List; import java.util.Map; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.UriInfo; - import io.swagger.v3.oas.annotations.Operation; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.Context; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.UriInfo; /** * Contains endpoints necessary for intra-cluster communication--that is, requests that @@ -66,7 +65,7 @@ protected InternalClusterResource(RestClient restClient, RestRequestTimeout requ /** * @return a {@link Herder} instance that can be used to satisfy the current request; may not be null - * @throws javax.ws.rs.NotFoundException if no such herder can be provided + * @throws jakarta.ws.rs.NotFoundException if no such herder can be provided */ protected abstract Herder herderForRequest(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java index 228c7cd67baf6..760d36a8fc3c3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResource.java @@ -20,8 +20,8 @@ import org.apache.kafka.connect.runtime.rest.RestClient; import org.apache.kafka.connect.runtime.rest.RestRequestTimeout; -import javax.inject.Inject; -import javax.ws.rs.Path; +import jakarta.inject.Inject; +import jakarta.ws.rs.Path; @Path("/connectors") public class InternalConnectResource extends InternalClusterResource { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java index 11219f4efe39c..dbbfb46375dfd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java @@ -29,20 +29,19 @@ import java.util.Map; import java.util.Objects; -import javax.inject.Inject; -import javax.ws.rs.Consumes; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; +import jakarta.inject.Inject; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; /** * A set of endpoints to adjust the log levels of runtime loggers. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java index 8cdad7bc800f0..0af2983395ee0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/RootResource.java @@ -28,14 +28,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import javax.inject.Inject; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - import io.swagger.v3.oas.annotations.Operation; +import jakarta.inject.Inject; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; @Path("/") @Produces(MediaType.APPLICATION_JSON) diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java index 948dcfaf1592c..54aa1bb19084e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java @@ -63,7 +63,7 @@ import java.util.stream.IntStream; import java.util.stream.Stream; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java index 195905f3b76d3..079887c361d24 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java @@ -73,9 +73,9 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; +import static jakarta.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG; import static org.apache.kafka.clients.CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG; import static org.apache.kafka.common.config.AbstractConfig.CONFIG_PROVIDERS_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java index a95352dbdbf0c..9b76bf2ce64cb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java @@ -40,7 +40,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java index a625dc983e8a8..d85ac9a440cb4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java @@ -36,7 +36,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java index 78c9a61406559..e0f395f442508 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java @@ -55,9 +55,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; +import static jakarta.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java index 8ccc31baa86c9..1af52dba59f89 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java @@ -36,11 +36,11 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.core.Response; -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; +import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java index 7969471918e1d..86473ffe613b4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java @@ -31,8 +31,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; -import static javax.ws.rs.core.Response.Status.FORBIDDEN; +import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST; +import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java index 69a65ba7bfbde..f13781c8ceabb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java @@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.integration.BlockingConnectorTest.Block.BLOCK_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_START; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java index aa715667d24c4..01a9bcf6373e4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java @@ -50,6 +50,7 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConfigBackingStore; +import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; @@ -177,6 +178,7 @@ public class AbstractHerderTest { private final ConnectorClientConfigOverridePolicy noneConnectorClientConfigOverridePolicy = new NoneConnectorClientConfigOverridePolicy(); @Mock private Worker worker; + @Mock private WorkerConfig workerConfig; @Mock private WorkerConfigTransformer transformer; @Mock private ConfigBackingStore configStore; @Mock private StatusBackingStore statusStore; @@ -206,10 +208,10 @@ public void testConnectorClientConfigOverridePolicyClose() { public void testConnectorStatus() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); + when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); - when(herder.plugins()).thenReturn(plugins); + AbstractHerder herder = testHerder(); when(herder.rawConfig(connectorName)).thenReturn(Collections.singletonMap( ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName() @@ -240,10 +242,10 @@ public void testConnectorStatus() { public void testConnectorStatusMissingPlugin() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("Unable to find class")); + when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("Unable to find class")); - when(herder.plugins()).thenReturn(plugins); + AbstractHerder herder = testHerder(); when(herder.rawConfig(connectorName)) .thenReturn(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); @@ -271,10 +273,11 @@ public void testConnectorStatusMissingPlugin() { @Test public void testConnectorInfo() { - AbstractHerder herder = testHerder(); - when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); - when(herder.plugins()).thenReturn(plugins); + when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); + when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); @@ -310,10 +313,11 @@ public void testResumeConnector() { @Test public void testConnectorInfoMissingPlugin() { - AbstractHerder herder = testHerder(); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); - when(herder.plugins()).thenReturn(plugins); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); + when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); @@ -481,7 +485,7 @@ public void testConfigValidationMissingName() { Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); // Base connector config has 15 fields, connector's configs add 7 - assertEquals(22, infos.size()); + assertEquals(26, infos.size()); // Missing name should generate an error assertEquals(ConnectorConfig.NAME_CONFIG, infos.get(ConnectorConfig.NAME_CONFIG).configValue().name()); @@ -592,7 +596,7 @@ public void testConfigValidationTransformsExtendResults() { assertEquals(1, result.errorCount()); Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(27, infos.size()); + assertEquals(31, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); @@ -649,7 +653,7 @@ public void testConfigValidationPredicatesExtendResults() { assertEquals(1, result.errorCount()); Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(29, infos.size()); + assertEquals(33, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); assertTrue(infos.get("transforms.xformA.type").configValue().errors().isEmpty()); @@ -710,8 +714,8 @@ public void testConfigValidationPrincipalOnlyOverride() { ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - // Base connector config has 15 fields, connector's configs add 7, and 2 producer overrides - assertEquals(24, result.values().size()); + // Base connector config has 19 fields, connector's configs add 7, and 2 producer overrides + assertEquals(28, result.values().size()); assertTrue(result.values().stream().anyMatch( configInfo -> ackConfigKey.equals(configInfo.configValue().name()) && !configInfo.configValue().errors().isEmpty())); assertTrue(result.values().stream().anyMatch( @@ -1040,8 +1044,8 @@ private void testConnectorPluginConfig( ) throws ClassNotFoundException { AbstractHerder herder = testHerder(); - when(plugins.pluginClass(pluginName)).then(invocation -> newPluginInstance.get().getClass()); - when(plugins.newPlugin(anyString())).then(invocation -> newPluginInstance.get()); + when(plugins.pluginClass(pluginName, null)).then(invocation -> newPluginInstance.get().getClass()); + when(plugins.newPlugin(anyString(), any())).then(invocation -> newPluginInstance.get()); when(herder.plugins()).thenReturn(plugins); List configs = herder.connectorPluginConfig(pluginName); @@ -1060,7 +1064,7 @@ public void testGetConnectorConfigDefWithBadName() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString())).thenThrow(new ClassNotFoundException()); + when(plugins.pluginClass(anyString(), any())).thenThrow(new ClassNotFoundException()); assertThrows(NotFoundException.class, () -> herder.connectorPluginConfig(connName)); } @@ -1070,17 +1074,17 @@ public void testGetConnectorConfigDefWithInvalidPluginType() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString())).thenReturn((Class) Object.class); - when(plugins.newPlugin(anyString())).thenReturn(new DirectoryConfigProvider()); + when(plugins.pluginClass(anyString(), any())).thenReturn((Class) Object.class); + when(plugins.newPlugin(anyString(), any())).thenReturn(new DirectoryConfigProvider()); assertThrows(BadRequestException.class, () -> herder.connectorPluginConfig(connName)); } @Test public void testGetConnectorTypeWithMissingPlugin() { String connName = "AnotherPlugin"; - AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); + when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); + AbstractHerder herder = testHerder(); assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); } @@ -1244,15 +1248,14 @@ private AbstractHerder createConfigValidationHerder(Class c private AbstractHerder createConfigValidationHerder(Class connectorClass, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy, int countOfCallingNewConnector) { - - AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); - // Call to validateConnectorConfig when(worker.configTransformer()).thenReturn(transformer); @SuppressWarnings("unchecked") final ArgumentCaptor> mapArgumentCaptor = ArgumentCaptor.forClass(Map.class); when(transformer.transform(mapArgumentCaptor.capture())).thenAnswer(invocation -> mapArgumentCaptor.getValue()); when(worker.getPlugins()).thenReturn(plugins); + + AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); final Connector connector; try { connector = connectorClass.getConstructor().newInstance(); @@ -1275,14 +1278,17 @@ private AbstractHerder testHerder(ConnectorClientConfigOverridePolicy connectorC .defaultAnswer(CALLS_REAL_METHODS)); } + @SuppressWarnings({"unchecked", "rawtypes"}) private void mockValidationIsolation(String connectorClass, Connector connector) { - when(plugins.newConnector(connectorClass)).thenReturn(connector); - when(plugins.connectorLoader(connectorClass)).thenReturn(classLoader); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(worker.config()).thenReturn(workerConfig); + when(plugins.newConnector(anyString(), any())).thenReturn(connector); + when(plugins.pluginLoader(connectorClass, null)).thenReturn(classLoader); when(plugins.withClassLoader(classLoader)).thenReturn(loaderSwap); } private void verifyValidationIsolation() { - verify(plugins).newConnector(anyString()); + verify(plugins).newConnector(anyString(), any()); verify(plugins).withClassLoader(classLoader); verify(loaderSwap).close(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java index cb91530439f3d..1324b9a22638c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java @@ -27,7 +27,7 @@ public class SampleSourceConnector extends SourceConnector { - public static final String VERSION = "an entirely different version"; + public static final String VERSION = "some great version"; @Override public String version() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index 6b4d066ca1016..74da1703482af 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -114,9 +114,9 @@ import javax.crypto.SecretKey; +import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; +import static jakarta.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; import static java.util.Collections.singletonList; -import static javax.ws.rs.core.Response.Status.FORBIDDEN; -import static javax.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; import static org.apache.kafka.connect.runtime.AbstractStatus.State.FAILED; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -320,7 +320,7 @@ public void setUp() throws Exception { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[]{uponShutdown})); - + verify(worker).getPlugins(); configUpdateListener = herder.new ConfigUpdateListener(); rebalanceListener = herder.new RebalanceListener(time); conn1SinkConfig = new SinkConnectorConfig(plugins, CONN1_CONFIG); @@ -3550,7 +3550,7 @@ public void testTaskReconfigurationRetriesWithLeaderRequestForwardingException() herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, noneConnectorClientConfigOverridePolicy, Collections.emptyList(), new MockSynchronousExecutor(), new AutoCloseable[]{})); - + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); when(member.memberId()).thenReturn("member"); @@ -3992,6 +3992,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceDisabled() throws Excepti public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exception { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment when(member.memberId()).thenReturn("leader"); @@ -4057,6 +4058,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio public void testModifyOffsetsSourceConnectorExactlyOnceEnabledZombieFencingFailure() { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); + verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java index 3c99091740088..d0559123b7251 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java @@ -463,9 +463,7 @@ private static ThreadFactory threadFactoryWithNamedThreads(String threadPrefix) return r -> { // This is essentially Executors.defaultThreadFactory except with // custom thread names so in order to filter by thread names when debugging - SecurityManager s = System.getSecurityManager(); - Thread t = new Thread((s != null) ? s.getThreadGroup() : - Thread.currentThread().getThreadGroup(), r, + Thread t = new Thread(Thread.currentThread().getThreadGroup(), r, threadPrefix + threadNumber.getAndIncrement(), 0); if (t.isDaemon()) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java index 1ec763a6bb0f3..13727c5b438bb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java @@ -58,7 +58,7 @@ import java.util.HashMap; import java.util.Map; -import javax.ws.rs.core.MediaType; +import jakarta.ws.rs.core.MediaType; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java index 4d37b7e67b76d..394031e0df105 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java @@ -21,21 +21,23 @@ import org.apache.kafka.connect.runtime.distributed.Crypto; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.HttpClient; +import org.eclipse.jetty.client.Request; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.net.URI; import java.security.NoSuchAlgorithmException; import java.util.Base64; import javax.crypto.Mac; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; -import javax.ws.rs.core.HttpHeaders; + +import jakarta.ws.rs.core.HttpHeaders; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -113,25 +115,16 @@ public void addToRequestShouldThrowExceptionOnInvalidSignatureAlgorithm() throws @Test public void addToRequestShouldAddHeadersOnValidSignatureAlgorithm() { - Request request = mock(Request.class); - ArgumentCaptor signatureCapture = ArgumentCaptor.forClass(String.class); - ArgumentCaptor signatureAlgorithmCapture = ArgumentCaptor.forClass(String.class); - when(request.header( - eq(InternalRequestSignature.SIGNATURE_HEADER), - signatureCapture.capture() - )).thenReturn(request); - when(request.header( - eq(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER), - signatureAlgorithmCapture.capture() - )).thenReturn(request); + HttpClient httpClient = new HttpClient(); + Request request = httpClient.newRequest(URI.create("http://localhost")); InternalRequestSignature.addToRequest(crypto, KEY, REQUEST_BODY, SIGNATURE_ALGORITHM, request); assertEquals(ENCODED_SIGNATURE, - signatureCapture.getValue(), + request.getHeaders().get(InternalRequestSignature.SIGNATURE_HEADER), "Request should have valid base 64-encoded signature added as header"); assertEquals(SIGNATURE_ALGORITHM, - signatureAlgorithmCapture.getValue(), + request.getHeaders().get(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER), "Request should have provided signature algorithm added as header"); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java index 75e224cfa32bb..b5449daa81202 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestClientTest.java @@ -25,9 +25,9 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.Request; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; @@ -45,7 +45,8 @@ import java.util.stream.Stream; import javax.crypto.SecretKey; -import javax.ws.rs.core.Response; + +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -118,7 +119,7 @@ private static Stream requestExceptions() { private static Request buildThrowingMockRequest(Throwable t) throws ExecutionException, InterruptedException, TimeoutException { Request req = mock(Request.class); - when(req.header(anyString(), anyString())).thenReturn(req); + when(req.headers(any())).thenReturn(req); when(req.send()).thenThrow(t); return req; } @@ -310,7 +311,7 @@ public void testUseSslConfigsOnlyWhenNecessary() throws Exception { public void testHttpRequestInterrupted() throws ExecutionException, InterruptedException, TimeoutException { Request req = mock(Request.class); doThrow(new InterruptedException()).when(req).send(); - doReturn(req).when(req).header(anyString(), anyString()); + doReturn(req).when(req).headers(any()); doReturn(req).when(httpClient).newRequest(anyString()); ConnectRestException e = assertThrows(ConnectRestException.class, () -> httpRequest( httpClient, MOCK_URL, TEST_METHOD, TEST_TYPE, TEST_SIGNATURE_ALGORITHM @@ -323,7 +324,7 @@ public void testHttpRequestInterrupted() throws ExecutionException, InterruptedE private void setupHttpClient(int responseCode, Request req, ContentResponse resp) throws Exception { when(resp.getStatus()).thenReturn(responseCode); when(req.send()).thenReturn(resp); - when(req.header(anyString(), anyString())).thenReturn(req); + when(req.headers(any())).thenReturn(req); when(httpClient.newRequest(anyString())).thenReturn(req); } @@ -356,4 +357,4 @@ public int hashCode() { return Objects.hash(content); } } -} \ No newline at end of file +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java index eb34d619d5cc9..1b8376db635d4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java @@ -84,7 +84,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.ws.rs.BadRequestException; +import jakarta.ws.rs.BadRequestException; import static java.util.Arrays.asList; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; @@ -428,14 +428,14 @@ public void testListAllPlugins() { @Test public void testGetConnectorConfigDef() { String connName = ConnectorPluginsResourceTestConnector.class.getName(); - when(herder.connectorPluginConfig(eq(connName))).thenAnswer(answer -> { + when(herder.connectorPluginConfig(eq(connName), eq(null))).thenAnswer(answer -> { List results = new ArrayList<>(); for (ConfigDef.ConfigKey configKey : ConnectorPluginsResourceTestConnector.CONFIG_DEF.configKeys().values()) { results.add(AbstractHerder.convertConfigKey(configKey)); } return results; }); - List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName); + List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName, null); assertEquals(ConnectorPluginsResourceTestConnector.CONFIG_DEF.names().size(), connectorConfigDef.size()); for (String config : ConnectorPluginsResourceTestConnector.CONFIG_DEF.names()) { Optional cki = connectorConfigDef.stream().filter(c -> c.name().equals(config)).findFirst(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java index 700284a9c66ee..9dfead77220f6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java @@ -64,12 +64,12 @@ import java.util.Map; import java.util.Set; -import javax.ws.rs.BadRequestException; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; +import jakarta.ws.rs.BadRequestException; +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.MultivaluedHashMap; +import jakarta.ws.rs.core.MultivaluedMap; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.core.UriInfo; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java index 0e24f86695169..aee85a86c2ab2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java @@ -44,8 +44,9 @@ import java.util.Map; import javax.crypto.Mac; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.UriInfo; + +import jakarta.ws.rs.core.HttpHeaders; +import jakarta.ws.rs.core.UriInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java index 916de425bd984..c73bba8c84368 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java @@ -35,7 +35,7 @@ import java.util.Collections; import java.util.List; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -167,4 +167,4 @@ public void testSetLevelClusterScope() { verify(herder).setClusterLoggerLevel(logger, level); } -} \ No newline at end of file +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java index dfdf081227cf3..459bc58201392 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/RootResourceTest.java @@ -37,7 +37,7 @@ import java.util.concurrent.TimeoutException; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java index a3dc0efef99d4..408f4cb886b29 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java @@ -17,9 +17,12 @@ package org.apache.kafka.connect.runtime.rest.util; import org.apache.kafka.common.config.SslConfigs; +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.common.network.CertStores; import org.apache.kafka.connect.runtime.rest.RestServerConfig; import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.Arrays; @@ -33,6 +36,22 @@ public class SSLUtilsTest { + private Map sslConfig; + private String keystorePath; + private String truststorePath; + private Password keystorePassword; + private Password truststorePassword; + + @BeforeEach + public void before() throws Exception { + CertStores serverCertStores = new CertStores(true, "localhost"); + sslConfig = serverCertStores.getUntrustingConfig(); + keystorePath = sslConfig.get("ssl.keystore.location").toString(); + truststorePath = sslConfig.get("ssl.truststore.location").toString(); + keystorePassword = (Password) sslConfig.get("ssl.keystore.password"); + truststorePassword = (Password) sslConfig.get("ssl.keystore.password"); + } + @Test public void testGetOrDefault() { String existingKey = "exists"; @@ -47,13 +66,13 @@ public void testGetOrDefault() { } @Test - public void testCreateServerSideSslContextFactory() { + public void testCreateServerSideSslContextFactory() throws Exception { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -69,8 +88,8 @@ public void testCreateServerSideSslContextFactory() { RestServerConfig config = RestServerConfig.forPublic(null, configMap); SslContextFactory.Server ssl = SSLUtils.createServerSideSslContextFactory(config); - assertEquals("file:///path/to/keystore", ssl.getKeyStorePath()); - assertEquals("file:///path/to/truststore", ssl.getTrustStorePath()); + assertEquals("file://" + keystorePath, ssl.getKeyStorePath()); + assertEquals("file://" + truststorePath, ssl.getTrustStorePath()); assertEquals("SunJSSE", ssl.getProvider()); assertArrayEquals(new String[] {"SSL_RSA_WITH_RC4_128_SHA", "SSL_RSA_WITH_RC4_128_MD5"}, ssl.getIncludeCipherSuites()); assertEquals("SHA1PRNG", ssl.getSecureRandomAlgorithm()); @@ -87,11 +106,11 @@ public void testCreateServerSideSslContextFactory() { @Test public void testCreateClientSideSslContextFactory() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -107,8 +126,8 @@ public void testCreateClientSideSslContextFactory() { RestServerConfig config = RestServerConfig.forPublic(null, configMap); SslContextFactory.Client ssl = SSLUtils.createClientSideSslContextFactory(config); - assertEquals("file:///path/to/keystore", ssl.getKeyStorePath()); - assertEquals("file:///path/to/truststore", ssl.getTrustStorePath()); + assertEquals("file://" + keystorePath, ssl.getKeyStorePath()); + assertEquals("file://" + truststorePath, ssl.getTrustStorePath()); assertEquals("SunJSSE", ssl.getProvider()); assertArrayEquals(new String[] {"SSL_RSA_WITH_RC4_128_SHA", "SSL_RSA_WITH_RC4_128_MD5"}, ssl.getIncludeCipherSuites()); assertEquals("SHA1PRNG", ssl.getSecureRandomAlgorithm()); @@ -123,11 +142,11 @@ public void testCreateClientSideSslContextFactory() { @Test public void testCreateServerSideSslContextFactoryDefaultValues() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); @@ -148,11 +167,11 @@ public void testCreateServerSideSslContextFactoryDefaultValues() { @Test public void testCreateClientSideSslContextFactoryDefaultValues() { Map configMap = new HashMap<>(); - configMap.put("ssl.keystore.location", "/path/to/keystore"); - configMap.put("ssl.keystore.password", "123456"); - configMap.put("ssl.key.password", "123456"); - configMap.put("ssl.truststore.location", "/path/to/truststore"); - configMap.put("ssl.truststore.password", "123456"); + configMap.put("ssl.keystore.location", keystorePath); + configMap.put("ssl.keystore.password", keystorePassword.value()); + configMap.put("ssl.key.password", keystorePassword.value()); + configMap.put("ssl.truststore.location", truststorePath); + configMap.put("ssl.truststore.password", truststorePassword.value()); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java index c220ca9c70d0c..17a7d7c391a5e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java @@ -38,6 +38,7 @@ import org.apache.kafka.connect.runtime.TaskConfig; import org.apache.kafka.connect.runtime.TaskStatus; import org.apache.kafka.connect.runtime.Worker; +import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import org.apache.kafka.connect.runtime.distributed.SampleConnectorClientConfigOverridePolicy; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; @@ -56,6 +57,7 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.MemoryConfigBackingStore; +import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; @@ -130,6 +132,8 @@ private enum SourceSink { @Mock protected Worker worker; @Mock + protected WorkerConfig workerConfig; + @Mock protected WorkerConfigTransformer transformer; @Mock private Plugins plugins; @@ -144,9 +148,11 @@ private enum SourceSink { noneConnectorClientConfigOverridePolicy = new SampleConnectorClientConfigOverridePolicy(); public void initialize(boolean mockTransform) { + when(worker.getPlugins()).thenReturn(plugins); herder = mock(StandaloneHerder.class, withSettings() .useConstructor(worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy, new MockTime()) .defaultAnswer(CALLS_REAL_METHODS)); + verify(worker).getPlugins(); createCallback = new FutureCallback<>(); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); if (mockTransform) @@ -173,6 +179,7 @@ public void testCreateSourceConnector() throws Exception { } @Test + @SuppressWarnings("rawtypes") public void testCreateConnectorFailedValidation() { initialize(false); // Basic validation should be performed and return an error, but should still evaluate the connector's config @@ -185,12 +192,13 @@ public void testCreateConnectorFailedValidation() { final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(worker.config()).thenReturn(workerConfig); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); when(connectorMock.config()).thenReturn(new ConfigDef()); - ConfigValue validatedValue = new ConfigValue("foo.bar"); when(connectorMock.validate(config)).thenReturn(new Config(singletonList(validatedValue))); @@ -850,6 +858,7 @@ public void testPutTaskConfigs() { } @Test + @SuppressWarnings("rawtypes") public void testCorruptConfig() { initialize(false); Map config = new HashMap<>(); @@ -870,10 +879,12 @@ public void testCorruptConfig() { when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); + when(worker.config()).thenReturn(workerConfig); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(configDef); herder.putConnectorConfig(CONNECTOR_NAME, config, true, createCallback); @@ -1212,6 +1223,7 @@ private static Map taskConfig(SourceSink sourceSink) { return generatedTaskProps; } + @SuppressWarnings("rawtypes") private void expectConfigValidation( SourceSink sourceSink, Map... configs @@ -1221,13 +1233,13 @@ private void expectConfigValidation( when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); - when(worker.getPlugins()).thenReturn(plugins); - when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); + when(worker.config()).thenReturn(workerConfig); + when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); + when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); - // Assume the connector should always be created when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString())).thenReturn(connectorMock); + when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(new ConfigDef()); // Set up validation for each config diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java index c30c78ad7160a..8dc22edb86309 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java @@ -37,7 +37,7 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.test.TestUtils.waitForCondition; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java index e76ccf9ed2c3e..b576cda56a75d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java @@ -37,14 +37,15 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.eclipse.jetty.client.ContentResponse; import org.eclipse.jetty.client.HttpClient; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.client.util.StringContentProvider; +import org.eclipse.jetty.client.Request; +import org.eclipse.jetty.client.StringRequestContent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -55,7 +56,9 @@ import java.util.Set; import java.util.stream.Collectors; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; + +import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; abstract class EmbeddedConnect { @@ -81,6 +84,10 @@ protected EmbeddedConnect( this.kafkaCluster = new EmbeddedKafkaCluster(numBrokers, brokerProps, clientProps); this.maskExitProcedures = maskExitProcedures; this.httpClient = new HttpClient(); + // Necessary to prevent the rest request from timing out too early + // Before this change,ConnectWorkerIntegrationTest#testPollTimeoutExpiry() was failing + // because the request was being stopped by jetty before the framework responded + this.httpClient.setIdleTimeout(DEFAULT_REST_REQUEST_TIMEOUT_MS); this.assertions = new ConnectAssertions(this); // we should keep the original class loader and set it back after connector stopped since the connector will change the class loader, // and then, the Mockito will use the unexpected class loader to generate the wrong proxy instance, which makes mock failed @@ -992,8 +999,8 @@ protected Response requestHttpMethod(String url, String body, Map headers.forEach(mutable::add)); + req.body(new StringRequestContent("application/json", body, StandardCharsets.UTF_8)); } ContentResponse res = req.send(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java index 66ce78d0d1bab..5678b97bb1314 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java @@ -37,7 +37,7 @@ import java.util.Properties; import java.util.Set; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java index 1c21038e66a1a..ee0cf18212a20 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java @@ -52,6 +52,7 @@ import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -2468,4 +2469,23 @@ public void close() throws Exception { Utils.closeQuietly(runtimeMetrics, "runtime metrics"); log.info("Coordinator runtime closed."); } + + /** + * Util method which returns all the topic partitions for which + * the state machine is in active state. + *

+ * This could be useful if the caller does not have a specific + * target internal topic partition. + * @return List of {@link TopicPartition} whose coordinators are active + */ + public List activeTopicPartitions() { + if (coordinators == null || coordinators.isEmpty()) { + return Collections.emptyList(); + } + + return coordinators.entrySet().stream() + .filter(entry -> entry.getValue().state.equals(CoordinatorState.ACTIVE)) + .map(Map.Entry::getKey) + .toList(); + } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java index 47df8bcae3499..cb8bec3f71c94 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java @@ -107,4 +107,15 @@ CompletableFuture maybeStartTransactionVerification( short producerEpoch, short apiVersion ) throws KafkaException; + + /** + * Delete records from a topic partition until specified offset + * @param tp The partition to delete records from + * @param deleteBeforeOffset Offset to delete until, starting from the beginning + * @throws KafkaException Any KafkaException caught during the operation. + */ + CompletableFuture deleteRecords( + TopicPartition tp, + long deleteBeforeOffset + ) throws KafkaException; } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java index 1f676ad550fc8..a8551f0734bbd 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java @@ -115,6 +115,14 @@ public long append( } } + @Override + public CompletableFuture deleteRecords( + TopicPartition tp, + long deleteBeforeOffset + ) throws KafkaException { + throw new RuntimeException("method not implemented"); + } + @Override public CompletableFuture maybeStartTransactionVerification( TopicPartition tp, diff --git a/core/src/main/scala/kafka/Kafka.scala b/core/src/main/scala/kafka/Kafka.scala index f32f23d3475e7..1c783ca7dc03d 100755 --- a/core/src/main/scala/kafka/Kafka.scala +++ b/core/src/main/scala/kafka/Kafka.scala @@ -61,20 +61,13 @@ object Kafka extends Logging { props } - // For Zk mode, the API forwarding is currently enabled only under migration flag. We can - // directly do a static IBP check to see API forwarding is enabled here because IBP check is - // static in Zk mode. - private def enableApiForwarding(config: KafkaConfig) = - config.migrationEnabled && config.interBrokerProtocolVersion.isApiForwardingEnabled - private def buildServer(props: Properties): Server = { val config = KafkaConfig.fromProps(props, doLog = false) if (config.requiresZookeeper) { new KafkaServer( config, Time.SYSTEM, - threadNamePrefix = None, - enableForwarding = enableApiForwarding(config) + threadNamePrefix = None ) } else { new KafkaRaftServer( diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala index 5d886a3040136..e794bceeca73e 100644 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ b/core/src/main/scala/kafka/controller/KafkaController.scala @@ -21,7 +21,7 @@ import com.yammer.metrics.core.{Meter, Timer} import java.util.concurrent.TimeUnit import kafka.common._ import kafka.cluster.Broker -import kafka.controller.KafkaController.{ActiveBrokerCountMetricName, ActiveControllerCountMetricName, AlterReassignmentsCallback, ControllerStateMetricName, ElectLeadersCallback, FencedBrokerCountMetricName, GlobalPartitionCountMetricName, GlobalTopicCountMetricName, ListReassignmentsCallback, OfflinePartitionsCountMetricName, PreferredReplicaImbalanceCountMetricName, ReplicasIneligibleToDeleteCountMetricName, ReplicasToDeleteCountMetricName, TopicsIneligibleToDeleteCountMetricName, TopicsToDeleteCountMetricName, UpdateFeaturesCallback, ZkMigrationStateMetricName} +import kafka.controller.KafkaController.{ActiveBrokerCountMetricName, ActiveControllerCountMetricName, AlterReassignmentsCallback, ControllerStateMetricName, ElectLeadersCallback, FencedBrokerCountMetricName, GlobalPartitionCountMetricName, GlobalTopicCountMetricName, ListReassignmentsCallback, OfflinePartitionsCountMetricName, PreferredReplicaImbalanceCountMetricName, ReplicasIneligibleToDeleteCountMetricName, ReplicasToDeleteCountMetricName, TopicsIneligibleToDeleteCountMetricName, TopicsToDeleteCountMetricName, UpdateFeaturesCallback} import kafka.coordinator.transaction.ZkProducerIdManager import kafka.server._ import kafka.server.metadata.ZkFinalizedFeatureCache @@ -42,7 +42,6 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AbstractControlRequest, ApiError, LeaderAndIsrResponse, UpdateFeaturesRequest, UpdateMetadataResponse} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.common.{AdminOperationException, ProducerIdsBlock} import org.apache.kafka.server.metrics.KafkaMetricsGroup @@ -81,11 +80,9 @@ object KafkaController extends Logging { private val ReplicasIneligibleToDeleteCountMetricName = "ReplicasIneligibleToDeleteCount" private val ActiveBrokerCountMetricName = "ActiveBrokerCount" private val FencedBrokerCountMetricName = "FencedBrokerCount" - private val ZkMigrationStateMetricName = "ZkMigrationState" // package private for testing private[controller] val MetricNames = Set( - ZkMigrationStateMetricName, ActiveControllerCountMetricName, OfflinePartitionsCountMetricName, PreferredReplicaImbalanceCountMetricName, @@ -174,7 +171,6 @@ class KafkaController(val config: KafkaConfig, /* single-thread scheduler to clean expired tokens */ private val tokenCleanScheduler = new KafkaScheduler(1, true, "delegation-token-cleaner") - metricsGroup.newGauge(ZkMigrationStateMetricName, () => ZkMigrationState.ZK.value().intValue()) metricsGroup.newGauge(ActiveControllerCountMetricName, () => if (isActive) 1 else 0) metricsGroup.newGauge(OfflinePartitionsCountMetricName, () => offlinePartitionCount) metricsGroup.newGauge(PreferredReplicaImbalanceCountMetricName, () => preferredReplicaImbalanceCount) diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala index f70819ef438ba..211be799a7e89 100644 --- a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala +++ b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala @@ -165,4 +165,25 @@ class CoordinatorPartitionWriter( // Required offset. partitionResult.lastOffset + 1 } + + override def deleteRecords(tp: TopicPartition, deleteBeforeOffset: Long): CompletableFuture[Void] = { + val responseFuture: CompletableFuture[Void] = new CompletableFuture[Void]() + + replicaManager.deleteRecords( + timeout = 30000L, // 30 seconds. + offsetPerPartition = Map(tp -> deleteBeforeOffset), + responseCallback = results => { + val result = results.get(tp) + if (result.isEmpty) { + responseFuture.completeExceptionally(new IllegalStateException(s"Delete status $result should have partition $tp.")) + } else if (result.get.errorCode != Errors.NONE.code) { + responseFuture.completeExceptionally(Errors.forCode(result.get.errorCode).exception) + } else { + responseFuture.complete(null) + } + }, + allowInternalTopicDeletion = true + ) + responseFuture + } } diff --git a/core/src/main/scala/kafka/raft/RaftManager.scala b/core/src/main/scala/kafka/raft/RaftManager.scala index 4fe5020a97445..79d2d2d4f2242 100644 --- a/core/src/main/scala/kafka/raft/RaftManager.scala +++ b/core/src/main/scala/kafka/raft/RaftManager.scala @@ -98,8 +98,6 @@ object KafkaRaftManager { // These constraints are enforced in KafkaServer, but repeating them here to guard against future callers if (config.processRoles.nonEmpty) { throw new RuntimeException("Not deleting metadata log dir since this node is in KRaft mode.") - } else if (!config.migrationEnabled) { - throw new RuntimeException("Not deleting metadata log dir since migrations are not enabled.") } else { val metadataDir = new File(config.metadataLogDir) val logDirName = UnifiedLog.logDirName(Topic.CLUSTER_METADATA_TOPIC_PARTITION) diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index ba5eef40e5cec..405caf240b848 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -3160,7 +3160,7 @@ class KafkaApis(val requestChannel: RequestChannel, new KafkaPrincipal(entry.principalType, entry.principalName)) // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { + if (!zkSupport.controller.isActive) { requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => CreateDelegationTokenResponse.prepareResponse(request.context.requestVersion, requestThrottleMs, Errors.NOT_CONTROLLER, owner, requester)) @@ -3204,7 +3204,7 @@ class KafkaApis(val requestChannel: RequestChannel, .setExpiryTimestampMs(expiryTimestamp))) } // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { + if (!zkSupport.controller.isActive) { requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new RenewDelegationTokenResponse( new RenewDelegationTokenResponseData() @@ -3250,7 +3250,7 @@ class KafkaApis(val requestChannel: RequestChannel, .setExpiryTimestampMs(expiryTimestamp))) } // DelegationToken changes only need to be executed on the controller during migration - if (config.migrationEnabled && (!zkSupport.controller.isActive)) { + if (!zkSupport.controller.isActive) { requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new ExpireDelegationTokenResponse( new ExpireDelegationTokenResponseData() @@ -3617,12 +3617,16 @@ class KafkaApis(val requestChannel: RequestChannel, clusterId, () => { val brokers = new DescribeClusterResponseData.DescribeClusterBrokerCollection() - metadataCache.getAliveBrokerNodes(request.context.listenerName).foreach { node => + val describeClusterRequest = request.body[DescribeClusterRequest] + metadataCache.getBrokerNodes(request.context.listenerName).foreach { node => + if (!node.isFenced || describeClusterRequest.data().includeFencedBrokers()) { brokers.add(new DescribeClusterResponseData.DescribeClusterBroker(). setBrokerId(node.id). setHost(node.host). setPort(node.port). - setRack(node.rack)) + setRack(node.rack). + setIsFenced(node.isFenced)) + } } brokers }, diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 9502e81f3e49d..92c3b5249579a 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -337,9 +337,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def requiresZookeeper: Boolean = processRoles.isEmpty def usesSelfManagedQuorum: Boolean = processRoles.nonEmpty - val migrationEnabled: Boolean = getBoolean(KRaftConfigs.MIGRATION_ENABLED_CONFIG) - val migrationMetadataMinBatchSize: Int = getInt(KRaftConfigs.MIGRATION_METADATA_MIN_BATCH_SIZE_CONFIG) - private def parseProcessRoles(): Set[ProcessRole] = { val roles = getList(KRaftConfigs.PROCESS_ROLES_CONFIG).asScala.map { case "broker" => ProcessRole.BrokerRole @@ -804,9 +801,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) throw new ConfigException(s"Missing required configuration `${ZkConfigs.ZK_CONNECT_CONFIG}` which has no default value.") } if (brokerIdGenerationEnable) { - if (migrationEnabled) { - require(brokerId >= 0, "broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).") - } require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be greater than or equal to -1 and not greater than reserved.broker.max.id") } else { require(brokerId >= 0, "broker.id must be greater than or equal to 0") @@ -817,11 +811,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) throw new ConfigException(s"Missing configuration `${KRaftConfigs.NODE_ID_CONFIG}` which is required " + s"when `process.roles` is defined (i.e. when running in KRaft mode).") } - if (migrationEnabled) { - if (zkConnect == null) { - throw new ConfigException(s"If using `${KRaftConfigs.MIGRATION_ENABLED_CONFIG}` in KRaft mode, `${ZkConfigs.ZK_CONNECT_CONFIG}` must also be set.") - } - } } require(logRollTimeMillis >= 1, "log.roll.ms must be greater than or equal to 1") require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be greater than or equal to 0") @@ -846,15 +835,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) ) } } - def validateQuorumVotersAndQuorumBootstrapServerForMigration(): Unit = { - if (voterIds.isEmpty && quorumConfig.bootstrapServers.isEmpty) { - throw new ConfigException( - s"""If using ${KRaftConfigs.MIGRATION_ENABLED_CONFIG}, either ${QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG} must - |contain the set of bootstrap controllers or ${QuorumConfig.QUORUM_VOTERS_CONFIG} must contain a parseable - |set of controllers.""".stripMargin.replace("\n", " ") - ) - } - } + def validateControlPlaneListenerEmptyForKRaft(): Unit = { require(controlPlaneListenerName.isEmpty, s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} is not supported in KRaft mode.") @@ -922,25 +903,9 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) validateAdvertisedControllerListenersNonEmptyForKRaftController() validateControllerListenerNamesMustAppearInListenersForKRaftController() } else { - // ZK-based - if (migrationEnabled) { - require(brokerId >= 0, - "broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).") - validateQuorumVotersAndQuorumBootstrapServerForMigration() - require(controllerListenerNames.nonEmpty, - s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must not be empty when running in ZooKeeper migration mode: ${controllerListenerNames.asJava}") - require(interBrokerProtocolVersion.isMigrationSupported, s"Cannot enable ZooKeeper migration without setting " + - s"'${ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG}' to 3.4 or higher") - if (logDirs.size > 1) { - require(interBrokerProtocolVersion.isDirectoryAssignmentSupported, - s"Cannot enable ZooKeeper migration with multiple log directories (aka JBOD) without setting " + - s"'${ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG}' to ${MetadataVersion.IBP_3_7_IV2} or higher") - } - } else { - // controller listener names must be empty when not in KRaft mode - require(controllerListenerNames.isEmpty, - s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must be empty when not running in KRaft mode: ${controllerListenerNames.asJava}") - } + // controller listener names must be empty when not in KRaft mode + require(controllerListenerNames.isEmpty, + s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must be empty when not running in KRaft mode: ${controllerListenerNames.asJava}") } val listenerNames = listeners.map(_.listenerName).toSet diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 0cb6ee48726d8..afd8429e57d18 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -26,15 +26,13 @@ import kafka.log.LogManager import kafka.log.remote.RemoteLogManager import kafka.metrics.KafkaMetricsReporter import kafka.network.{ControlPlaneAcceptor, DataPlaneAcceptor, RequestChannel, SocketServer} -import kafka.raft.KafkaRaftManager -import kafka.server.metadata.{OffsetTrackingListener, ZkConfigRepository, ZkMetadataCache} +import kafka.server.metadata.{ZkConfigRepository, ZkMetadataCache} import kafka.utils._ import kafka.zk.{AdminZkClient, BrokerInfo, KafkaZkClient} import org.apache.kafka.clients.{ApiVersions, ManualMetadataUpdater, MetadataRecoveryStrategy, NetworkClient, NetworkClientUtils} import org.apache.kafka.common.config.ConfigException import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.ApiMessageType.ListenerType -import org.apache.kafka.common.message.BrokerRegistrationRequestData.{Listener, ListenerCollection} import org.apache.kafka.common.message.ControlledShutdownRequestData import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network._ @@ -48,19 +46,15 @@ import org.apache.kafka.common.{Endpoint, Node, TopicPartition} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.image.loader.metrics.MetadataLoaderMetrics -import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble.VerificationFlag import org.apache.kafka.metadata.properties.MetaPropertiesEnsemble.VerificationFlag.REQUIRE_V0 import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble} -import org.apache.kafka.metadata.{BrokerState, MetadataRecordSerde, VersionRange} -import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.raft.Endpoints +import org.apache.kafka.metadata.BrokerState import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.MetadataVersion._ -import org.apache.kafka.server.common.{ApiMessageAndVersion, MetadataVersion, NodeToControllerChannelManager} +import org.apache.kafka.server.common.NodeToControllerChannelManager import org.apache.kafka.server.config.{ConfigType, ZkConfigs} -import org.apache.kafka.server.fault.LoggingFaultHandler import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.util.KafkaScheduler @@ -75,7 +69,7 @@ import java.time.Duration import java.util import java.util.concurrent._ import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import java.util.{Optional, OptionalInt, OptionalLong} +import java.util.{Optional, OptionalInt} import scala.collection.{Map, Seq} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOption @@ -114,8 +108,7 @@ object KafkaServer { class KafkaServer( val config: KafkaConfig, time: Time = Time.SYSTEM, - threadNamePrefix: Option[String] = None, - enableForwarding: Boolean = false + threadNamePrefix: Option[String] = None ) extends KafkaBroker with Server { private val startupComplete = new AtomicBoolean(false) @@ -205,7 +198,6 @@ class KafkaServer( @volatile def kafkaController: KafkaController = _kafkaController var lifecycleManager: BrokerLifecycleManager = _ - private var raftManager: KafkaRaftManager[ApiMessageAndVersion] = _ @volatile var brokerEpochManager: ZkBrokerEpochManager = _ @@ -241,9 +233,6 @@ class KafkaServer( val initialMetaPropsEnsemble = { val loader = new MetaPropertiesEnsemble.Loader() loader.addLogDirs(config.logDirs.asJava) - if (config.migrationEnabled) { - loader.addMetadataLogDir(config.metadataLogDir) - } loader.load() } @@ -252,11 +241,7 @@ class KafkaServer( } else { OptionalInt.of(config.brokerId) } - val verificationFlags = if (config.migrationEnabled) { - util.EnumSet.noneOf(classOf[VerificationFlag]) - } else { - util.EnumSet.of(REQUIRE_V0) - } + val verificationFlags = util.EnumSet.of(REQUIRE_V0) initialMetaPropsEnsemble.verify(Optional.of(_clusterId), verificationId, verificationFlags) /* generate brokerId */ @@ -294,11 +279,6 @@ class KafkaServer( val builder = new MetaProperties.Builder(e.getValue). setClusterId(_clusterId). setNodeId(config.brokerId) - if (!builder.directoryId().isPresent) { - if (config.migrationEnabled) { - builder.setDirectoryId(copier.generateValidDirectoryId()) - } - } copier.setLogDirProps(logDir, builder.build()) }) copier.emptyLogDirs().clear() @@ -332,8 +312,7 @@ class KafkaServer( metadataCache = MetadataCache.zkMetadataCache( config.brokerId, config.interBrokerProtocolVersion, - brokerFeatures, - config.migrationEnabled) + brokerFeatures) val controllerNodeProvider = new MetadataCacheControllerNodeProvider(metadataCache, config, () => Option(quorumControllerNodeProvider).map(_.getControllerInfo())) @@ -360,11 +339,7 @@ class KafkaServer( clientToControllerChannelManager.start() /* start forwarding manager */ - var autoTopicCreationChannel = Option.empty[NodeToControllerChannelManager] - if (enableForwarding) { - this.forwardingManager = Some(ForwardingManager(clientToControllerChannelManager, metrics)) - autoTopicCreationChannel = Some(clientToControllerChannelManager) - } + val autoTopicCreationChannel = Option.empty[NodeToControllerChannelManager] val apiVersionManager = ApiVersionManager( ListenerType.ZK_BROKER, @@ -415,81 +390,6 @@ class KafkaServer( _kafkaController = new KafkaController(config, zkClient, time, metrics, brokerInfo, brokerEpoch, tokenManager, brokerFeatures, metadataCache, threadNamePrefix) kafkaController.startup() - if (config.migrationEnabled) { - logger.info("Starting up additional components for ZooKeeper migration") - lifecycleManager = new BrokerLifecycleManager(config, - time, - s"zk-broker-${config.nodeId}-", - isZkBroker = true, - logManager.directoryIdsSet) - - // For ZK brokers in migration mode, always delete the metadata partition on startup. - logger.info(s"Deleting local metadata log from ${config.metadataLogDir} since this is a ZK broker in migration mode.") - KafkaRaftManager.maybeDeleteMetadataLogDir(config) - logger.info("Successfully deleted local metadata log. It will be re-created.") - - // If the ZK broker is in migration mode, start up a RaftManager to learn about the new KRaft controller - val quorumVoters = QuorumConfig.parseVoterConnections(config.quorumConfig.voters) - raftManager = new KafkaRaftManager[ApiMessageAndVersion]( - metaPropsEnsemble.clusterId().get(), - config, - // metadata log dir and directory.id must exist because migration is enabled - metaPropsEnsemble.logDirProps.get(metaPropsEnsemble.metadataLogDir.get).directoryId.get, - new MetadataRecordSerde, - KafkaRaftServer.MetadataPartition, - KafkaRaftServer.MetadataTopicId, - time, - metrics, - threadNamePrefix, - CompletableFuture.completedFuture(quorumVoters), - QuorumConfig.parseBootstrapServers(config.quorumConfig.bootstrapServers), - // Endpoint information is only needed for KRaft controllers (voters). ZK brokers - // (observers) can never be KRaft controllers - Endpoints.empty(), - fatalFaultHandler = new LoggingFaultHandler("raftManager", () => shutdown()) - ) - quorumControllerNodeProvider = RaftControllerNodeProvider(raftManager, config) - val brokerToQuorumChannelManager = new NodeToControllerChannelManagerImpl( - controllerNodeProvider = quorumControllerNodeProvider, - time = time, - metrics = metrics, - config = config, - channelName = "quorum", - s"zk-broker-${config.nodeId}-", - retryTimeoutMs = config.requestTimeoutMs.longValue - ) - - val listener = new OffsetTrackingListener() - raftManager.register(listener) - raftManager.startup() - - val networkListeners = new ListenerCollection() - config.effectiveAdvertisedBrokerListeners.foreach { ep => - networkListeners.add(new Listener(). - setHost(if (Utils.isBlank(ep.host)) InetAddress.getLocalHost.getCanonicalHostName else ep.host). - setName(ep.listenerName.value()). - setPort(if (ep.port == 0) socketServer.boundPort(ep.listenerName) else ep.port). - setSecurityProtocol(ep.securityProtocol.id)) - } - - val features = BrokerFeatures.createDefaultFeatureMap(BrokerFeatures.createDefault(config.unstableFeatureVersionsEnabled)).asScala - - // Even though ZK brokers don't use "metadata.version" feature, we need to overwrite it with our IBP as part of registration - // so the KRaft controller can verify that all brokers are on the same IBP before starting the migration. - val featuresRemapped = features + (MetadataVersion.FEATURE_NAME -> - VersionRange.of(config.interBrokerProtocolVersion.featureLevel(), config.interBrokerProtocolVersion.featureLevel())) - - lifecycleManager.start( - () => listener.highestOffset, - brokerToQuorumChannelManager, - clusterId, - networkListeners, - featuresRemapped.asJava, - OptionalLong.empty() - ) - logger.debug("Start RaftManager") - } - // Used by ZK brokers during a KRaft migration. When talking to a KRaft controller, we need to use the epoch // from BrokerLifecycleManager rather than ZK (via KafkaController) brokerEpochManager = new ZkBrokerEpochManager(metadataCache, kafkaController, Option(lifecycleManager)) @@ -630,18 +530,6 @@ class KafkaServer( dynamicConfigManager = new ZkConfigManager(zkClient, dynamicConfigHandlers) dynamicConfigManager.startup() - if (config.migrationEnabled && lifecycleManager != null) { - lifecycleManager.initialCatchUpFuture.whenComplete { case (_, t) => - if (t != null) { - fatal("Encountered an exception when waiting to catch up with KRaft metadata log", t) - shutdown() - } else { - info("Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker") - lifecycleManager.setReadyToUnfence() - } - } - } - val enableRequestProcessingFuture = socketServer.enableRequestProcessing(authorizerFutures) // Block here until all the authorizer futures are complete try { @@ -946,21 +834,6 @@ class KafkaServer( _brokerState = BrokerState.PENDING_CONTROLLED_SHUTDOWN - if (config.migrationEnabled && lifecycleManager != null && metadataCache.getControllerId.exists(_.isInstanceOf[KRaftCachedControllerId])) { - // For now we'll send the heartbeat with WantShutDown set so the KRaft controller can see a broker - // shutting down without waiting for the heartbeat to time out. - info("Notifying KRaft of controlled shutdown") - lifecycleManager.beginControlledShutdown() - try { - lifecycleManager.controlledShutdownFuture.get(5L, TimeUnit.MINUTES) - } catch { - case _: TimeoutException => - error("Timed out waiting for the controller to approve controlled shutdown") - case e: Throwable => - error("Got unexpected exception waiting for controlled shutdown future", e) - } - } - val shutdownSucceeded = doControlledShutdown(config.controlledShutdownMaxRetries.intValue) if (!shutdownSucceeded) @@ -1070,9 +943,6 @@ class KafkaServer( // Clear all reconfigurable instances stored in DynamicBrokerConfig config.dynamicConfig.clear() - if (raftManager != null) - CoreUtils.swallow(raftManager.shutdown(), this) - if (lifecycleManager != null) { lifecycleManager.close() } diff --git a/core/src/main/scala/kafka/server/MetadataCache.scala b/core/src/main/scala/kafka/server/MetadataCache.scala index 4b14f04483eaf..562c9d0ce4ae7 100755 --- a/core/src/main/scala/kafka/server/MetadataCache.scala +++ b/core/src/main/scala/kafka/server/MetadataCache.scala @@ -75,6 +75,8 @@ trait MetadataCache { def getAliveBrokerNodes(listenerName: ListenerName): Iterable[Node] + def getBrokerNodes(listenerName: ListenerName): Iterable[Node] + def getPartitionInfo(topic: String, partitionId: Int): Option[UpdateMetadataRequestData.UpdateMetadataPartitionState] /** @@ -117,10 +119,9 @@ trait MetadataCache { object MetadataCache { def zkMetadataCache(brokerId: Int, metadataVersion: MetadataVersion, - brokerFeatures: BrokerFeatures = BrokerFeatures.createEmpty(), - zkMigrationEnabled: Boolean = false) + brokerFeatures: BrokerFeatures = BrokerFeatures.createEmpty()) : ZkMetadataCache = { - new ZkMetadataCache(brokerId, metadataVersion, brokerFeatures, zkMigrationEnabled) + new ZkMetadataCache(brokerId, metadataVersion, brokerFeatures) } def kRaftMetadataCache( diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index 67cc1a84c56aa..292b0fd70a036 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -1172,11 +1172,11 @@ class ReplicaManager(val config: KafkaConfig, * Delete records on leader replicas of the partition, and wait for delete records operation be propagated to other replicas; * the callback function will be triggered either when timeout or logStartOffset of all live replicas have reached the specified offset */ - private def deleteRecordsOnLocalLog(offsetPerPartition: Map[TopicPartition, Long]): Map[TopicPartition, LogDeleteRecordsResult] = { + private def deleteRecordsOnLocalLog(offsetPerPartition: Map[TopicPartition, Long], allowInternalTopicDeletion: Boolean): Map[TopicPartition, LogDeleteRecordsResult] = { trace("Delete records on local logs to offsets [%s]".format(offsetPerPartition)) offsetPerPartition.map { case (topicPartition, requestedOffset) => - // reject delete records operation on internal topics - if (Topic.isInternal(topicPartition.topic)) { + // reject delete records operation for internal topics unless allowInternalTopicDeletion is true + if (Topic.isInternal(topicPartition.topic) && !allowInternalTopicDeletion) { (topicPartition, LogDeleteRecordsResult(-1L, -1L, Some(new InvalidTopicException(s"Cannot delete records of internal topic ${topicPartition.topic}")))) } else { try { @@ -1369,9 +1369,10 @@ class ReplicaManager(val config: KafkaConfig, def deleteRecords(timeout: Long, offsetPerPartition: Map[TopicPartition, Long], - responseCallback: Map[TopicPartition, DeleteRecordsPartitionResult] => Unit): Unit = { + responseCallback: Map[TopicPartition, DeleteRecordsPartitionResult] => Unit, + allowInternalTopicDeletion: Boolean = false): Unit = { val timeBeforeLocalDeleteRecords = time.milliseconds - val localDeleteRecordsResults = deleteRecordsOnLocalLog(offsetPerPartition) + val localDeleteRecordsResults = deleteRecordsOnLocalLog(offsetPerPartition, allowInternalTopicDeletion) debug("Delete records on local log in %d ms".format(time.milliseconds - timeBeforeLocalDeleteRecords)) val deleteRecordsStatus = localDeleteRecordsResults.map { case (topicPartition, result) => @@ -2092,24 +2093,6 @@ class ReplicaManager(val config: KafkaConfig, s"Latest known controller epoch is $controllerEpoch") leaderAndIsrRequest.getErrorResponse(0, Errors.STALE_CONTROLLER_EPOCH.exception) } else { - // In migration mode, reconcile missed topic deletions when handling full LISR from KRaft controller. - // LISR "type" field was previously unspecified (0), so if we see it set to Full (2), then we know the - // request came from a KRaft controller. - // - // Note that we have to do this first, before anything else, since topics may be recreated with the same - // name, but a different ID. And in that case, we need to move aside the old version of those topics - // (with the obsolete topic ID) before doing anything else. - if (config.migrationEnabled && - leaderAndIsrRequest.isKRaftController && - leaderAndIsrRequest.requestType() == AbstractControlRequest.Type.FULL) - { - val strays = LogManager.findStrayReplicas(localBrokerId, leaderAndIsrRequest, logManager.allLogs) - stateChangeLogger.info(s"While handling full LeaderAndIsr request from KRaft " + - s"controller $controllerId with correlation id $correlationId, found ${strays.size} " + - "stray partition(s).") - updateStrayLogs(strays) - } - val responseMap = new mutable.HashMap[TopicPartition, Errors] controllerEpoch = leaderAndIsrRequest.controllerEpoch @@ -2670,16 +2653,12 @@ class ReplicaManager(val config: KafkaConfig, s"for partitions ${partitionsWithOfflineFutureReplica.mkString(",")} because they are in the failed log directory $dir.") } logManager.handleLogDirFailure(dir) - if (dir == new File(config.metadataLogDir).getAbsolutePath && (config.processRoles.nonEmpty || config.migrationEnabled)) { + if (dir == new File(config.metadataLogDir).getAbsolutePath && config.processRoles.nonEmpty) { fatal(s"Shutdown broker because the metadata log dir $dir has failed") Exit.halt(1) } if (notifyController) { - if (config.migrationEnabled) { - fatal(s"Shutdown broker because some log directory has failed during migration mode: $dir") - Exit.halt(1) - } if (zkClient.isEmpty) { if (uuid.isDefined) { directoryEventHandler.handleFailure(uuid.get) diff --git a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala index 5fad48f8a71e5..5d7484714d019 100644 --- a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala +++ b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala @@ -381,6 +381,10 @@ class KRaftMetadataCache( flatMap(_.node(listenerName.value()).toScala).toSeq } + override def getBrokerNodes(listenerName: ListenerName): Seq[Node] = { + _currentImage.cluster().brokers().values().asScala.flatMap(_.node(listenerName.value()).asScala).toSeq + } + // Does NOT include offline replica metadata override def getPartitionInfo(topicName: String, partitionId: Int): Option[UpdateMetadataPartitionState] = { Option(_currentImage.topics().getTopic(topicName)). diff --git a/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala index 3205a24aa4447..d7f1d86846657 100755 --- a/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala +++ b/core/src/main/scala/kafka/server/metadata/ZkMetadataCache.scala @@ -157,8 +157,7 @@ object ZkMetadataCache { class ZkMetadataCache( brokerId: Int, metadataVersion: MetadataVersion, - brokerFeatures: BrokerFeatures, - zkMigrationEnabled: Boolean = false) + brokerFeatures: BrokerFeatures) extends MetadataCache with ZkFinalizedFeatureCache with Logging { private val partitionMetadataLock = new ReentrantReadWriteLock() @@ -354,6 +353,10 @@ class ZkMetadataCache( metadataSnapshot.aliveBrokers.values.flatMap(_.getNode(listenerName)) } + override def getBrokerNodes(listenerName: ListenerName): Iterable[Node] = { + getAliveBrokerNodes(listenerName) + } + def getTopicId(topicName: String): Uuid = { metadataSnapshot.topicIds.getOrElse(topicName, Uuid.ZERO_UUID) } @@ -476,9 +479,6 @@ class ZkMetadataCache( stateChangeLogger.error(s"Received UpdateMetadataRequest with Type=FULL (2), but version of " + updateMetadataRequest.version() + ", which should not be possible. Not treating this as a full " + "metadata update") - } else if (!zkMigrationEnabled) { - stateChangeLogger.error(s"Received UpdateMetadataRequest with Type=FULL (2), but ZK migrations " + - s"are not enabled on this broker. Not treating this as a full metadata update") } else { // When handling a UMR from a KRaft controller, we may have to insert some partition // deletions at the beginning, to handle the different way topic deletion works in KRaft diff --git a/core/src/main/scala/kafka/utils/ToolsUtils.scala b/core/src/main/scala/kafka/utils/ToolsUtils.scala deleted file mode 100644 index 7831ee64d1e7d..0000000000000 --- a/core/src/main/scala/kafka/utils/ToolsUtils.scala +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -package kafka.utils - -import joptsimple.OptionParser -import org.apache.kafka.server.util.CommandLineUtils - -object ToolsUtils { - /** - * This is a simple wrapper around `CommandLineUtils.printUsageAndExit`. - * It is needed for tools migration (KAFKA-14525), as there is no Java equivalent for return type `Nothing`. - * Can be removed once ZooKeeper related code are deleted. - * - * @param parser Command line options parser. - * @param message Error message. - */ - def printUsageAndExit(parser: OptionParser, message: String): Nothing = { - CommandLineUtils.printUsageAndExit(parser, message) - throw new AssertionError("printUsageAndExit should not return, but it did.") - } -} diff --git a/core/src/main/scala/kafka/zk/KafkaZkClient.scala b/core/src/main/scala/kafka/zk/KafkaZkClient.scala index 106d5075dc400..3c9740fb5ce2f 100644 --- a/core/src/main/scala/kafka/zk/KafkaZkClient.scala +++ b/core/src/main/scala/kafka/zk/KafkaZkClient.scala @@ -30,13 +30,12 @@ import org.apache.kafka.common.security.token.delegation.{DelegationToken, Token import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderAndIsr -import org.apache.kafka.metadata.migration.ZkMigrationLeadershipState import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.config.{ConfigType, ZkConfigs} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.storage.internals.log.LogConfig import org.apache.zookeeper.KeeperException.{Code, NodeExistsException} -import org.apache.zookeeper.OpResult.{CheckResult, CreateResult, ErrorResult, SetDataResult} +import org.apache.zookeeper.OpResult.{CreateResult, ErrorResult, SetDataResult} import org.apache.zookeeper.client.ZKClientConfig import org.apache.zookeeper.common.ZKConfig import org.apache.zookeeper.data.{ACL, Stat} @@ -165,92 +164,6 @@ class KafkaZkClient private[zk] ( tryCreateControllerZNodeAndIncrementEpoch() } - /** - * Registers a given KRaft controller in zookeeper as the active controller. Unlike the ZK equivalent of this method, - * this creates /controller as a persistent znode. This prevents ZK brokers from attempting to claim the controller - * leadership during a KRaft leadership failover. - * - * This method is called at the beginning of a KRaft migration and during subsequent KRaft leadership changes during - * the migration. - * - * To ensure that the KRaft controller epoch exceeds the current ZK controller epoch, this registration algorithm - * uses a conditional update on the /controller and /controller_epoch znodes. - * - * If a new controller is registered concurrently with this registration, one of the two will fail the CAS - * operation on /controller_epoch. For KRaft, we have an extra guard against the registered KRaft epoch going - * backwards. If a KRaft controller had previously registered, an additional CAS operation is done on the /controller - * ZNode to ensure that the KRaft epoch being registered is newer. - * - * @param kraftControllerId ID of the KRaft controller node - * @param kraftControllerEpoch Epoch of the KRaft controller node - * @return A result object containing the written ZK controller epoch and version, or nothing. - */ - def tryRegisterKRaftControllerAsActiveController(kraftControllerId: Int, kraftControllerEpoch: Int): KRaftRegistrationResult = { - val timestamp = time.milliseconds() - val curEpochOpt: Option[(Int, Int)] = getControllerEpoch.map(e => (e._1, e._2.getVersion)) - val controllerOpt = getControllerRegistration - - // If we have a KRaft epoch registered in /controller, and it is not _older_ than the requested epoch, throw an error. - controllerOpt.flatMap(_.kraftEpoch).foreach { kraftEpochInZk => - if (kraftEpochInZk >= kraftControllerEpoch) { - throw new ControllerMovedException(s"Cannot register KRaft controller $kraftControllerId with epoch $kraftControllerEpoch " + - s"as the current controller register in ZK has the same or newer epoch $kraftEpochInZk.") - } - } - - curEpochOpt match { - case None => - throw new IllegalStateException(s"Cannot register KRaft controller $kraftControllerId as the active controller " + - s"since there is no ZK controller epoch present.") - case Some((curEpoch: Int, curEpochZk: Int)) => - val newControllerEpoch = curEpoch + 1 - - val response = controllerOpt match { - case Some(controller) => - info(s"KRaft controller $kraftControllerId overwriting ${ControllerZNode.path} to become the active " + - s"controller with ZK epoch $newControllerEpoch. The previous controller was ${controller.broker}.") - retryRequestUntilConnected( - MultiRequest(Seq( - SetDataOp(ControllerEpochZNode.path, ControllerEpochZNode.encode(newControllerEpoch), curEpochZk), - DeleteOp(ControllerZNode.path, controller.zkVersion), - CreateOp(ControllerZNode.path, ControllerZNode.encode(kraftControllerId, timestamp, kraftControllerEpoch), - defaultAcls(ControllerZNode.path), CreateMode.PERSISTENT))) - ) - case None => - info(s"KRaft controller $kraftControllerId creating ${ControllerZNode.path} to become the active " + - s"controller with ZK epoch $newControllerEpoch. There was no active controller.") - retryRequestUntilConnected( - MultiRequest(Seq( - SetDataOp(ControllerEpochZNode.path, ControllerEpochZNode.encode(newControllerEpoch), curEpochZk), - CreateOp(ControllerZNode.path, ControllerZNode.encode(kraftControllerId, timestamp, kraftControllerEpoch), - defaultAcls(ControllerZNode.path), CreateMode.PERSISTENT))) - ) - } - - val failureSuffix = s"while trying to register KRaft controller $kraftControllerId with ZK epoch " + - s"$newControllerEpoch. KRaft controller was not registered." - response.resultCode match { - case Code.OK => - info(s"Successfully registered KRaft controller $kraftControllerId with ZK epoch $newControllerEpoch") - // First op is always SetData on /controller_epoch - val setDataResult = response.zkOpResults.head.rawOpResult.asInstanceOf[SetDataResult] - SuccessfulRegistrationResult(newControllerEpoch, setDataResult.getStat.getVersion) - case Code.BADVERSION => - info(s"The ZK controller epoch changed $failureSuffix") - FailedRegistrationResult() - case Code.NONODE => - info(s"The ephemeral node at ${ControllerZNode.path} went away $failureSuffix") - FailedRegistrationResult() - case Code.NODEEXISTS => - info(s"The ephemeral node at ${ControllerZNode.path} was created by another controller $failureSuffix") - FailedRegistrationResult() - case code => - error(s"ZooKeeper had an error $failureSuffix") - throw KeeperException.create(code) - } - } - } - private def maybeCreateControllerEpochZNode(): (Int, Int) = { createControllerEpochRaw(KafkaController.InitialControllerEpoch).resultCode match { case Code.OK => @@ -1723,36 +1636,6 @@ class KafkaZkClient private[zk] ( } } - def getOrCreateMigrationState(initialState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val getDataRequest = GetDataRequest(MigrationZNode.path) - val getDataResponse = retryRequestUntilConnected(getDataRequest) - getDataResponse.resultCode match { - case Code.OK => - MigrationZNode.decode(getDataResponse.data, getDataResponse.stat.getVersion, getDataResponse.stat.getMtime) - case Code.NONODE => - createInitialMigrationState(initialState) - case _ => throw getDataResponse.resultException.get - } - } - - private def createInitialMigrationState(initialState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val createRequest = CreateRequest( - MigrationZNode.path, - MigrationZNode.encode(initialState), - defaultAcls(MigrationZNode.path), - CreateMode.PERSISTENT) - val response = retryRequestUntilConnected(createRequest) - response.maybeThrow() - initialState.withMigrationZkVersion(0) - } - - def updateMigrationState(migrationState: ZkMigrationLeadershipState): ZkMigrationLeadershipState = { - val req = SetDataRequest(MigrationZNode.path, MigrationZNode.encode(migrationState), migrationState.migrationZkVersion()) - val resp = retryRequestUntilConnected(req) - resp.maybeThrow() - migrationState.withMigrationZkVersion(resp.stat.getVersion) - } - /** * Return the ACLs of the node of the given path * @param path the given path for the node @@ -1971,137 +1854,6 @@ class KafkaZkClient private[zk] ( } } - /** - * Safely performs a sequence of writes to ZooKeeper as part of a KRaft migration. For each request in {@code requests}, we - * wrap the operation in a multi-op transaction that includes a check op on /controller_epoch and /migration. This ensures - * that another KRaft controller or another ZK controller has unexpectedly taken leadership. - * - * In cases of KRaft failover during a migration, it is possible that a write is attempted before the old KRaft controller - * receives the new leader information. In this case, the check op on /migration acts as a guard against multiple writers. - * - * The multi-op for the last request in {@code requests} is used to update the /migration node with the latest migration - * state. This effectively checkpoints the progress of the migration in ZK relative to the metadata log. - * - * Each multi-op request is atomic. The overall sequence of multi-op requests is not atomic and we may fail during any - * of them. When the KRaft controller recovers the migration state, it will re-apply all of the writes needed to update - * the ZK state with the latest KRaft state. In the case of Create or Delete operations, these will fail if applied - * twice, so we need to ignore NodeExists and NoNode failures for those cases. - * - * @param requests A sequence of ZK requests. Only Create, Delete, and SetData are supported. - * @param migrationState The current migration state. This is written out as part of the final multi-op request. - * @return The new version of /migration ZNode and the sequence of responses for the given requests. - */ - def retryMigrationRequestsUntilConnected[Req <: AsyncRequest](requests: Seq[Req], - migrationState: ZkMigrationLeadershipState): (Int, Seq[Req#Response]) = { - - if (requests.isEmpty) { - return (migrationState.migrationZkVersion(), Seq.empty) - } - - def wrapMigrationRequest(request: Req, lastRequestInBatch: Boolean): MultiRequest = { - // Wrap a single request with the multi-op transactional request. - val checkOp = CheckOp(ControllerEpochZNode.path, migrationState.zkControllerEpochZkVersion()) - val migrationOp = if (lastRequestInBatch) { - SetDataOp(MigrationZNode.path, MigrationZNode.encode(migrationState), migrationState.migrationZkVersion()) - } else { - CheckOp(MigrationZNode.path, migrationState.migrationZkVersion()) - } - - request match { - case CreateRequest(path, data, acl, createMode, ctx) => - MultiRequest(Seq(checkOp, migrationOp, CreateOp(path, data, acl, createMode)), ctx) - case DeleteRequest(path, version, ctx) => - MultiRequest(Seq(checkOp, migrationOp, DeleteOp(path, version)), ctx) - case SetDataRequest(path, data, version, ctx) => - MultiRequest(Seq(checkOp, migrationOp, SetDataOp(path, data, version)), ctx) - case _ => throw new IllegalStateException(s"$request does not need controller epoch check") - } - } - - def handleUnwrappedMigrationResult(migrationOp: ZkOp, migrationResult: OpResult): Int = { - // Handle just the operation that updated /migration ZNode - val (path: String, data: Option[Array[Byte]], version: Int) = migrationOp match { - case CheckOp(path, version) => (path, None, version) - case SetDataOp(path, data, version) => (path, Some(data), version) - case _ => throw new IllegalStateException("Unexpected result on /migration znode") - } - - migrationResult match { - case _: CheckResult => version - case setDataResult: SetDataResult => setDataResult.getStat.getVersion - case errorResult: ErrorResult => - if (path.equals(MigrationZNode.path)) { - val errorCode = Code.get(errorResult.getErr) - if (errorCode == Code.BADVERSION) { - data match { - case Some(value) => - val failedPayload = MigrationZNode.decode(value, version, -1) - throw new RuntimeException( - s"Conditional update on KRaft Migration ZNode failed. Sent zkVersion = $version. The failed " + - s"write was: $failedPayload. This indicates that another KRaft controller is making writes to ZooKeeper.") - case None => - throw new RuntimeException(s"Check op on KRaft Migration ZNode failed. Sent zkVersion = $version. " + - s"This indicates that another KRaft controller is making writes to ZooKeeper.") - } - } else if (errorCode == Code.OK) { - // This means the Check or SetData op would have been ok, but failed because of another operation in this multi-op - version - } else { - throw KeeperException.create(errorCode, path) - } - } else { - throw new RuntimeException(s"Got migration result for incorrect path $path") - } - case _ => throw new RuntimeException( - s"Expected either CheckResult, SetDataResult, or ErrorResult for migration op, but saw $migrationResult") - } - } - - def unwrapMigrationResponse(response: AsyncResponse, lastRequestInBatch: Boolean): (AsyncResponse, Int) = { - response match { - case MultiResponse(resultCode, _, ctx, zkOpResults, responseMetadata) => - zkOpResults match { - case Seq(ZkOpResult(checkOp: CheckOp, checkOpResult), ZkOpResult(migrationOp: CheckOp, migrationResult), zkOpResult) => - // Matches all requests except or the last one (CheckOp on /migration) - if (lastRequestInBatch) { - throw new IllegalStateException("Should not see a Check operation on /migration in the last request.") - } - handleUnwrappedCheckOp(checkOp, checkOpResult) - val migrationVersion = handleUnwrappedMigrationResult(migrationOp, migrationResult) - (handleUnwrappedZkOp(zkOpResult, resultCode, ctx, responseMetadata), migrationVersion) - case Seq(ZkOpResult(checkOp: CheckOp, checkOpResult), ZkOpResult(migrationOp: SetDataOp, migrationResult), zkOpResult) => - // Matches the last request in a batch (SetDataOp on /migration) - if (!lastRequestInBatch) { - throw new IllegalStateException("Should only see a SetData operation on /migration in the last request.") - } - handleUnwrappedCheckOp(checkOp, checkOpResult) - val migrationVersion = handleUnwrappedMigrationResult(migrationOp, migrationResult) - (handleUnwrappedZkOp(zkOpResult, resultCode, ctx, responseMetadata), migrationVersion) - case null => throw KeeperException.create(resultCode) - case _ => throw new IllegalStateException( - s"Cannot unwrap $response because it does not contain the expected operations for a migration operation.") - } - case _ => throw new IllegalStateException(s"Cannot unwrap $response because it is not a MultiResponse") - } - } - - migrationState.zkControllerEpochZkVersion() match { - case ZkVersion.MatchAnyVersion => throw new IllegalArgumentException( - s"Expected a controller epoch zkVersion when making migration writes, not -1.") - case version if version >= 0 => - logger.trace(s"Performing ${requests.size} migration update(s) with migrationState=$migrationState") - val wrappedRequests = requests.map(req => wrapMigrationRequest(req, req == requests.last)) - val results = retryRequestsUntilConnected(wrappedRequests) - val unwrappedResults = results.map(resp => unwrapMigrationResponse(resp, resp == results.last)) - val migrationZkVersion = unwrappedResults.last._2 - // Return the new version of /migration and the sequence of responses to the original requests - (migrationZkVersion, unwrappedResults.map(_._1.asInstanceOf[Req#Response])) - case invalidVersion => - throw new IllegalArgumentException( - s"Expected controller epoch zkVersion $invalidVersion should be non-negative or equal to ${ZkVersion.MatchAnyVersion}") - } - } - private def retryRequestsUntilConnected[Req <: AsyncRequest](requests: Seq[Req]): Seq[Req#Response] = { val remainingRequests = new mutable.ArrayBuffer(requests.size) ++= requests val responses = new mutable.ArrayBuffer[Req#Response] diff --git a/core/src/main/scala/kafka/zk/ZkData.scala b/core/src/main/scala/kafka/zk/ZkData.scala index 7c1ec8ab56577..d4c92150909b6 100644 --- a/core/src/main/scala/kafka/zk/ZkData.scala +++ b/core/src/main/scala/kafka/zk/ZkData.scala @@ -37,7 +37,6 @@ import org.apache.kafka.common.security.token.delegation.TokenInformation import org.apache.kafka.common.utils.{SecurityUtils, Time} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationLeadershipState import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.common.{MetadataVersion, ProducerIdsBlock} @@ -1044,43 +1043,6 @@ object FeatureZNode { } } -object MigrationZNode { - val path = "/migration" - - def encode(migration: ZkMigrationLeadershipState): Array[Byte] = { - val jsonMap = Map( - "version" -> 0, - "kraft_controller_id" -> migration.kraftControllerId(), - "kraft_controller_epoch" -> migration.kraftControllerEpoch(), - "kraft_metadata_offset" -> migration.kraftMetadataOffset(), - "kraft_metadata_epoch" -> migration.kraftMetadataEpoch() - ) - Json.encodeAsBytes(jsonMap.asJava) - } - - def decode(bytes: Array[Byte], zkVersion: Int, modifyTimeMs: Long): ZkMigrationLeadershipState = { - val jsonDataAsString = bytes.map(_.toChar).mkString - Json.parseBytes(bytes).map(_.asJsonObject).flatMap { js => - val version = js("version").to[Int] - if (version != 0) { - throw new KafkaException(s"Encountered unknown version $version when parsing migration json $jsonDataAsString") - } - val controllerId = js("kraft_controller_id").to[Int] - val controllerEpoch = js("kraft_controller_epoch").to[Int] - val metadataOffset = js("kraft_metadata_offset").to[Long] - val metadataEpoch = js("kraft_metadata_epoch").to[Int] - Some(new ZkMigrationLeadershipState( - controllerId, - controllerEpoch, - metadataOffset, - metadataEpoch, - modifyTimeMs, - zkVersion, - ZkMigrationLeadershipState.EMPTY.zkControllerEpoch(), - ZkMigrationLeadershipState.EMPTY.zkControllerEpochZkVersion())) - }.getOrElse(throw new KafkaException(s"Failed to parse the migration json $jsonDataAsString")) - } -} object ZkData { @@ -1101,7 +1063,6 @@ object ZkData { LogDirEventNotificationZNode.path, DelegationTokenAuthZNode.path, ExtendedAclZNode.path, - MigrationZNode.path, FeatureZNode.path) ++ ZkAclStore.securePaths // These are persistent ZK paths that should exist on kafka broker startup. diff --git a/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala index d9cc326ff9456..64cc259408e13 100644 --- a/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala @@ -20,7 +20,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource class AdminClientRebootstrapTest extends RebootstrapTest { - @ParameterizedTest + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") @ValueSource(booleans = Array(false, true)) def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala index c7c87ce3b847a..5d6622799fe68 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala @@ -17,11 +17,12 @@ package kafka.api import kafka.api.ConsumerRebootstrapTest._ -import kafka.server.QuorumTestHarness.getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit +import kafka.server.QuorumTestHarness.getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows} +import org.junit.jupiter.api.Disabled import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} @@ -31,6 +32,7 @@ import java.util.concurrent.TimeUnit import java.util.concurrent.TimeoutException class ConsumerRebootstrapTest extends RebootstrapTest { + @Disabled("KAFKA-17986") @ParameterizedTest(name = RebootstrapTestName) @MethodSource(Array("rebootstrapTestParams")) def testRebootstrap(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { @@ -84,6 +86,7 @@ class ConsumerRebootstrapTest extends RebootstrapTest { consumeAndVerifyRecords(consumer, 10, 20, startingKeyAndValueIndex = 20, startingTimestamp = 20) } + @Disabled @ParameterizedTest(name = RebootstrapTestName) @MethodSource(Array("rebootstrapTestParams")) def testRebootstrapDisabled(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { @@ -133,8 +136,8 @@ object ConsumerRebootstrapTest { final val RebootstrapTestName = s"${TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames}.useRebootstrapTriggerMs={2}" def rebootstrapTestParams: stream.Stream[Arguments] = { - assertEquals(1, getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit.count()) - val args = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit + assertEquals(1, getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly.count()) + val args = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly .findFirst().get.get stream.Stream.of( Arguments.of((args :+ true):_*), diff --git a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala index bff719681a2b8..4141342c7a949 100644 --- a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala +++ b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala @@ -235,7 +235,7 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { createProducer(), createConsumer(), adminClient) } - case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaServer, + case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaBroker, producerClientId: String, consumerClientId: String, override val producer: KafkaProducer[Array[Byte], Array[Byte]], override val consumer: Consumer[Array[Byte], Array[Byte]], diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index fbdff9c4fc33e..890aff6da1edb 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -588,6 +588,30 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(brokerStrs.mkString(","), nodeStrs.mkString(",")) } + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesWithFencedBroker(quorum: String): Unit = { + client = createAdminClient + val fencedBrokerId = brokers.last.config.brokerId + killBroker(fencedBrokerId, JDuration.ofMillis(0)) + // It takes a few seconds for a broker to get fenced after being killed + // So we retry until only 2 of 3 brokers returned in the result or the max wait is reached + TestUtils.retry(20000) { + assertTrue(client.describeCluster().nodes().get().asScala.size.equals(brokers.size - 1)) + } + + // List nodes again but this time include the fenced broker + val nodes = client.describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)).nodes().get().asScala + assertTrue(nodes.size.equals(brokers.size)) + nodes.foreach(node => { + if (node.id().equals(fencedBrokerId)) { + assertTrue(node.isFenced) + } else { + assertFalse(node.isFenced) + } + }) + } + @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAdminClientHandlingBadIPWithoutTimeout(quorum: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala index d0eabf370cbbc..f32c4433b45bb 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala @@ -18,11 +18,13 @@ package kafka.api import org.apache.kafka.clients.producer.ProducerRecord import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Disabled import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource class ProducerRebootstrapTest extends RebootstrapTest { - @ParameterizedTest + @Disabled("KAFKA-17986") + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") @ValueSource(booleans = Array(false, true)) def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { server1.shutdown() diff --git a/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala index 45324a89c6ef1..5d3134a0870d4 100644 --- a/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala +++ b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala @@ -16,7 +16,7 @@ */ package kafka.api -import kafka.server.{KafkaConfig, KafkaServer} +import kafka.server.{KafkaBroker, KafkaConfig} import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.coordinator.group.GroupCoordinatorConfig @@ -26,8 +26,8 @@ import java.util.Properties abstract class RebootstrapTest extends AbstractConsumerTest { override def brokerCount: Int = 2 - def server0: KafkaServer = serverForId(0).get - def server1: KafkaServer = serverForId(1).get + def server0: KafkaBroker = serverForId(0).get + def server1: KafkaBroker = serverForId(1).get override def generateConfigs: Seq[KafkaConfig] = { val overridingProps = new Properties() @@ -36,7 +36,7 @@ abstract class RebootstrapTest extends AbstractConsumerTest { // In this test, fixed ports are necessary, because brokers must have the // same port after the restart. - FixedPortTestUtils.createBrokerConfigs(brokerCount, zkConnect, enableControlledShutdown = false) + FixedPortTestUtils.createBrokerConfigs(brokerCount, null, enableControlledShutdown = false) .map(KafkaConfig.fromProps(_, overridingProps)) } diff --git a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala index 5063e79ad08da..5fcc0449bb520 100644 --- a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala @@ -18,7 +18,7 @@ import java.util.Properties import com.yammer.metrics.core.Gauge import kafka.security.JaasTestUtils import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.{AdminClientConfig, CreateAclsResult} +import org.apache.kafka.clients.admin.{AdminClientConfig, CreateAclsResult, DescribeClusterOptions} import org.apache.kafka.common.acl._ import org.apache.kafka.common.config.SslConfigs import org.apache.kafka.common.config.internals.BrokerSecurityConfigs @@ -32,7 +32,7 @@ import org.apache.kafka.common.network.ConnectionMode import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.authorizer.{ClusterMetadataAuthorizer, StandardAuthorizer} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotNull, assertTrue} +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotNull, assertThrows, assertTrue} import org.junit.jupiter.api.{AfterEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -158,6 +158,25 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { super.tearDown() } + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllersIncludingFencedBrokers(quorum: String): Unit = { + useBoostrapControllers() + client = createAdminClient + val result = client.describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)) + val exception = assertThrows(classOf[Exception], () => { result.nodes().get()}) + assertTrue(exception.getCause.getCause.getMessage.contains("Cannot request fenced brokers from controller endpoint")) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllers(quorum: String): Unit = { + useBoostrapControllers() + client = createAdminClient + val result = client.describeCluster(new DescribeClusterOptions()) + assertTrue(result.nodes().get().size().equals(controllerServers.size)) + } + @ParameterizedTest @ValueSource(strings = Array("kraft")) def testAclUpdatesUsingSynchronousAuthorizer(quorum: String): Unit = { diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index ac59f026b0c2f..ce953990af892 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -82,7 +82,7 @@ class ZooKeeperQuorumImplementation( startup: Boolean, threadNamePrefix: Option[String], ): KafkaBroker = { - val server = new KafkaServer(config, time, threadNamePrefix, false) + val server = new KafkaServer(config, time, threadNamePrefix) if (startup) server.startup() server } diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala index f12d21019a71e..9b192e851e992 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala @@ -20,13 +20,14 @@ import kafka.server.ReplicaManager import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.coordinator.common.runtime.PartitionWriter import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import org.apache.kafka.test.TestUtils.assertFutureThrows -import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows} +import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows, assertTrue} import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.EnumSource @@ -238,4 +239,83 @@ class CoordinatorPartitionWriterTest { batch )) } + + @Test + def testDeleteRecordsResponseContainsError(): Unit = { + val replicaManager = mock(classOf[ReplicaManager]) + val partitionRecordWriter = new CoordinatorPartitionWriter( + replicaManager + ) + + val callbackCapture: ArgumentCaptor[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit]) + + // Response contains error. + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map( + new TopicPartition("random-topic", 0) -> new DeleteRecordsPartitionResult() + .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code + ))) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception, exp) + } + + // Empty response + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map[TopicPartition, DeleteRecordsPartitionResult]()) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertTrue(exp.isInstanceOf[IllegalStateException]) + } + } + + @Test + def testDeleteRecordsSuccess(): Unit = { + val replicaManager = mock(classOf[ReplicaManager]) + val partitionRecordWriter = new CoordinatorPartitionWriter( + replicaManager + ) + + val callbackCapture: ArgumentCaptor[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, DeleteRecordsPartitionResult] => Unit]) + + // response contains error + when(replicaManager.deleteRecords( + ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), + callbackCapture.capture(), + ArgumentMatchers.eq(true) + )).thenAnswer { _ => + callbackCapture.getValue.apply(Map( + new TopicPartition("random-topic", 0) -> new DeleteRecordsPartitionResult() + .setErrorCode(Errors.NONE.code) + )) + } + + partitionRecordWriter.deleteRecords( + new TopicPartition("random-topic", 0), + 10L + ).whenComplete { (_, exp) => + assertNull(exp) + } + } } diff --git a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala index 8a8772ea08da2..162b14760fded 100755 --- a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala +++ b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala @@ -51,18 +51,14 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { private val _brokers = new mutable.ArrayBuffer[KafkaBroker] /** - * Get the list of brokers, which could be either BrokerServer objects or KafkaServer objects. + * Get the list of brokers. */ def brokers: mutable.Buffer[KafkaBroker] = _brokers /** - * Get the list of brokers, as instances of KafkaServer. - * This method should only be used when dealing with brokers that use ZooKeeper. + * Get the list of brokers. */ - def servers: mutable.Buffer[KafkaServer] = { - checkIsZKTest() - _brokers.asInstanceOf[mutable.Buffer[KafkaServer]] - } + def servers: mutable.Buffer[KafkaBroker] = brokers def brokerServers: mutable.Buffer[BrokerServer] = { checkIsKRaftTest() @@ -102,9 +98,9 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { instanceConfigs } - def serverForId(id: Int): Option[KafkaServer] = servers.find(s => s.config.brokerId == id) + def serverForId(id: Int): Option[KafkaBroker] = brokers.find(s => s.config.brokerId == id) - def boundPort(server: KafkaServer): Int = server.boundPort(listenerName) + def boundPort(server: KafkaBroker): Int = server.boundPort(listenerName) def bootstrapServers(listenerName: ListenerName = listenerName): String = { TestUtils.bootstrapServers(_brokers, listenerName) @@ -345,47 +341,26 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { } } - def getController(): KafkaServer = { - checkIsZKTest() - val controllerId = TestUtils.waitUntilControllerElected(zkClient) - servers.filter(s => s.config.brokerId == controllerId).head - } - def getTopicIds(names: Seq[String]): Map[String, Uuid] = { val result = new util.HashMap[String, Uuid]() - if (isKRaftTest()) { - val topicIdsMap = controllerServer.controller.findTopicIds(ANONYMOUS_CONTEXT, names.asJava).get() - names.foreach { name => - val response = topicIdsMap.get(name) - result.put(name, response.result()) - } - } else { - val topicIdsMap = getController().kafkaController.controllerContext.topicIds.toMap - names.foreach { name => - if (topicIdsMap.contains(name)) result.put(name, topicIdsMap(name)) - } + val topicIdsMap = controllerServer.controller.findTopicIds(ANONYMOUS_CONTEXT, names.asJava).get() + names.foreach { name => + val response = topicIdsMap.get(name) + result.put(name, response.result()) } result.asScala.toMap } def getTopicIds(): Map[String, Uuid] = { - if (isKRaftTest()) { - controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap - } else { - getController().kafkaController.controllerContext.topicIds.toMap - } + controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().asScala.toMap } def getTopicNames(): Map[Uuid, String] = { - if (isKRaftTest()) { - val result = new util.HashMap[Uuid, String]() - controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().forEach { - (key, value) => result.put(value, key) - } - result.asScala.toMap - } else { - getController().kafkaController.controllerContext.topicNames.toMap + val result = new util.HashMap[Uuid, String]() + controllerServer.controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().forEach { + (key, value) => result.put(value, key) } + result.asScala.toMap } private def createBrokers(startup: Boolean): Unit = { @@ -408,13 +383,7 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { if (isKRaftTest()) { createBroker(config, brokerTime(config.brokerId), startup = false) } else { - TestUtils.createServer( - config, - time = brokerTime(config.brokerId), - threadNamePrefix = None, - startup = false, - enableZkApiForwarding = config.migrationEnabled && config.interBrokerProtocolVersion.isApiForwardingEnabled - ) + TestUtils.createServer(config, time = brokerTime(config.brokerId), threadNamePrefix = None, startup = false) } } diff --git a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala index 4bb0eccbb1812..1fa12ef990b32 100644 --- a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala @@ -20,7 +20,7 @@ package kafka.metrics import java.lang.management.ManagementFactory import java.util.Properties import javax.management.ObjectName -import com.yammer.metrics.core.{Gauge, MetricPredicate} +import com.yammer.metrics.core.MetricPredicate import org.junit.jupiter.api.Assertions._ import kafka.integration.KafkaServerTestHarness import kafka.server._ @@ -33,7 +33,6 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.metrics.JmxReporter import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics @@ -229,15 +228,10 @@ class MetricsTest extends KafkaServerTestHarness with Logging { "kafka.controller:type=KafkaController,name=MetadataErrorCount", "kafka.controller:type=KafkaController,name=OfflinePartitionsCount", "kafka.controller:type=KafkaController,name=PreferredReplicaImbalanceCount", - "kafka.controller:type=KafkaController,name=ZkMigrationState", ).foreach(expected => { assertEquals(1, metrics.keySet.asScala.count(_.getMBeanName.equals(expected)), s"Unable to find $expected") }) - - val zkStateMetricName = metrics.keySet.asScala.filter(_.getMBeanName == "kafka.controller:type=KafkaController,name=ZkMigrationState").head - val zkStateGauge = metrics.get(zkStateMetricName).asInstanceOf[Gauge[Int]] - assertEquals(ZkMigrationState.NONE.value().intValue(), zkStateGauge.value()) } /** diff --git a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala index 4c1494380ea60..95a9f92fe86ba 100644 --- a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala @@ -56,25 +56,9 @@ abstract class BaseRequestTest extends IntegrationTestHarness { }.map(_.socketServer).getOrElse(throw new IllegalStateException("No live broker is available")) } - def controllerSocketServer: SocketServer = { - if (isKRaftTest()) { - controllerServer.socketServer - } else { - servers.find { server => - server.kafkaController.isActive - }.map(_.socketServer).getOrElse(throw new IllegalStateException("No controller broker is available")) - } - } + def controllerSocketServer: SocketServer = controllerServer.socketServer - def notControllerSocketServer: SocketServer = { - if (isKRaftTest()) { - anySocketServer - } else { - servers.find { server => - !server.kafkaController.isActive - }.map(_.socketServer).getOrElse(throw new IllegalStateException("No non-controller broker is available")) - } - } + def notControllerSocketServer: SocketServer = anySocketServer def brokerSocketServer(brokerId: Int): SocketServer = { brokers.find { broker => diff --git a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala index 1a24eeb460a91..5456ab1f69dda 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala @@ -17,7 +17,7 @@ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterInstance, ClusterTest, ClusterTestExtensions, Type} +import org.apache.kafka.common.test.api.{ClusterInstance, ClusterTest, ClusterTestExtensions, Type} import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.{BrokerRegistrationRequestData, CreateTopicsRequestData} @@ -143,36 +143,7 @@ class BrokerRegistrationRequestTest { Errors.forCode(resp.topics().find(topicName).errorCode()) } - @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_4_IV0, - serverProperties = Array(new ClusterConfigProperty(key = "zookeeper.metadata.migration.enable", value = "false"))) - def testRegisterZkWithKRaftMigrationDisabled(clusterInstance: ClusterInstance): Unit = { - val clusterId = clusterInstance.clusterId() - val channelManager = brokerToControllerChannelManager(clusterInstance) - try { - channelManager.start() - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_3_IV0, MetadataVersion.IBP_3_3_IV0)))) - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), None)) - - assertEquals( - Errors.BROKER_ID_NOT_REGISTERED, - registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_4_IV0, MetadataVersion.IBP_3_4_IV0)))) - - assertEquals( - Errors.NONE, - registerBroker(channelManager, clusterId, 100, None, Some((MetadataVersion.IBP_3_4_IV0, MetadataVersion.IBP_3_4_IV0)))) - } finally { - channelManager.shutdown() - } - } - - @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3, - serverProperties = Array(new ClusterConfigProperty(key = "zookeeper.metadata.migration.enable", value = "false"))) + @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3) def testRegisterZkWith33Controller(clusterInstance: ClusterInstance): Unit = { // Verify that a controller running an old metadata.version cannot register a ZK broker val clusterId = clusterInstance.clusterId() diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index a36598a5eebca..27cd6644bd91f 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -77,7 +77,7 @@ import org.apache.kafka.common.utils.{ImplicitLinkedHashCollection, ProducerIdAn import org.apache.kafka.coordinator.group.GroupConfig.{CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, CONSUMER_SESSION_TIMEOUT_MS_CONFIG, SHARE_AUTO_OFFSET_RESET_CONFIG, SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, SHARE_RECORD_LOCK_DURATION_MS_CONFIG, SHARE_SESSION_TIMEOUT_MS_CONFIG} import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinator, GroupCoordinatorConfig} -import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorConfigTest} +import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorTestConfig} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.network.metrics.{RequestChannelMetrics, RequestMetrics} @@ -11702,7 +11702,7 @@ class KafkaApisTest extends Logging { val response = getReadShareGroupResponse( readRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, readStateResultData @@ -11757,7 +11757,7 @@ class KafkaApisTest extends Logging { val response = getReadShareGroupResponse( readRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, readStateResultData @@ -11812,7 +11812,7 @@ class KafkaApisTest extends Logging { val response = getWriteShareGroupResponse( writeRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, writeStateResultData @@ -11867,7 +11867,7 @@ class KafkaApisTest extends Logging { val response = getWriteShareGroupResponse( writeRequestData, - config ++ ShareCoordinatorConfigTest.testConfigMap().asScala, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, writeStateResultData diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 179a5db7ac755..a349564b95113 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -1829,45 +1829,6 @@ class KafkaConfigTest { ) } - @Test - def testMigrationCannotBeEnabledWithJBOD(): Unit = { - val props = TestUtils.createBrokerConfig(1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort, logDirCount = 2) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "3000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, MetadataVersion.IBP_3_7_IV1.version()) - - assertEquals( - "requirement failed: Cannot enable ZooKeeper migration with multiple log directories " + - "(aka JBOD) without setting 'inter.broker.protocol.version' to 3.7-IV2 or higher", - assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage) - } - - @Test - def testMigrationCannotBeEnabledWithBrokerIdGeneration(): Unit = { - val props = TestUtils.createBrokerConfig(-1, TestUtils.MockZkConnect, port = TestUtils.MockZkPort, logDirCount = 2) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "3000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - assertEquals( - "requirement failed: broker.id generation is incompatible with ZooKeeper migration. Please stop using it before enabling migration (set broker.id to a value greater or equal to 0).", - assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage) - } - - @Test - def testMigrationEnabledKRaftMode(): Unit = { - val props = new Properties() - props.putAll(kraftProps()) - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - - assertEquals( - "If using `zookeeper.metadata.migration.enable` in KRaft mode, `zookeeper.connect` must also be set.", - assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage) - - props.setProperty(ZkConfigs.ZK_CONNECT_CONFIG, "localhost:2181") - KafkaConfig.fromProps(props) - } - @Test def testConsumerGroupSessionTimeoutValidation(): Unit = { val props = new Properties() diff --git a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala index 6c03d5f467075..aa2e634e9bfaf 100644 --- a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala @@ -20,7 +20,6 @@ import java.io.File import java.util.Collections import java.util.concurrent.{ExecutionException, TimeUnit} import kafka.api.IntegrationTestHarness -import kafka.controller.{OfflineReplica, PartitionAndReplica} import kafka.utils.TestUtils.{Checkpoint, LogDirFailureType, Roll, waitUntilTrue} import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer.Consumer @@ -195,26 +194,17 @@ class LogDirFailureTest extends IntegrationTestHarness { // Consumer should receive some messages TestUtils.pollUntilAtLeastNumRecords(consumer, 1) - if (quorum == "kraft") { - waitUntilTrue(() => { - // get the broker with broker.nodeId == originalLeaderServerId - val brokerWithDirFail = brokers.find(_.config.nodeId == originalLeaderServerId).map(_.asInstanceOf[BrokerServer]) - // check if the broker has the offline log dir - val hasOfflineDir = brokerWithDirFail.exists(_.logDirFailureChannel.hasOfflineLogDir(failedLogDir.toPath.toString)) - // check if the broker has the offline replica - hasOfflineDir && brokerWithDirFail.exists(broker => - broker.replicaManager.metadataCache - .getClusterMetadata(broker.clusterId, broker.config.interBrokerListenerName) - .partition(new TopicPartition(topic, 0)).offlineReplicas().map(_.id()).contains(originalLeaderServerId)) - }, "Expected to find an offline log dir") - } else { - // There should be no remaining LogDirEventNotification znode - assertTrue(zkClient.getAllLogDirEventNotifications.isEmpty) - // The controller should have marked the replica on the original leader as offline - val controllerServer = servers.find(_.kafkaController.isActive).get - val offlineReplicas = controllerServer.kafkaController.controllerContext.replicasInState(topic, OfflineReplica) - assertTrue(offlineReplicas.contains(PartitionAndReplica(new TopicPartition(topic, 0), originalLeaderServerId))) - } + waitUntilTrue(() => { + // get the broker with broker.nodeId == originalLeaderServerId + val brokerWithDirFail = brokers.find(_.config.nodeId == originalLeaderServerId).map(_.asInstanceOf[BrokerServer]) + // check if the broker has the offline log dir + val hasOfflineDir = brokerWithDirFail.exists(_.logDirFailureChannel.hasOfflineLogDir(failedLogDir.toPath.toString)) + // check if the broker has the offline replica + hasOfflineDir && brokerWithDirFail.exists(broker => + broker.replicaManager.metadataCache + .getClusterMetadata(broker.clusterId, broker.config.interBrokerListenerName) + .partition(new TopicPartition(topic, 0)).offlineReplicas().map(_.id()).contains(originalLeaderServerId)) + }, "Expected to find an offline log dir") } diff --git a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala index 4e70652494c3b..8a1a04f6b9387 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala @@ -1017,75 +1017,6 @@ class MetadataCacheTest { (initialTopicIds, initialTopicStates, newTopicIds, newPartitionStates) } - /** - * Verify the behavior of ZkMetadataCache when handling "Full" UpdateMetadataRequest - */ - @Test - def testHandleFullUpdateMetadataRequestInZkMigration(): Unit = { - val (initialTopicIds, initialTopicStates, newTopicIds, newPartitionStates) = setupInitialAndFullMetadata() - - val updateMetadataRequestBuilder = () => new UpdateMetadataRequest.Builder(8, 1, 42, brokerEpoch, - newPartitionStates.asJava, Seq.empty.asJava, newTopicIds.asJava, true, AbstractControlRequest.Type.FULL).build() - - def verifyMetadataCache( - updateMetadataRequest: UpdateMetadataRequest, - zkMigrationEnabled: Boolean = true - )( - verifier: ZkMetadataCache => Unit - ): Unit = { - val cache = MetadataCache.zkMetadataCache(1, MetadataVersion.latestTesting(), zkMigrationEnabled = zkMigrationEnabled) - cache.updateMetadata(1, new UpdateMetadataRequest.Builder(8, 1, 42, brokerEpoch, - initialTopicStates.flatMap(_._2.values).toList.asJava, Seq.empty.asJava, initialTopicIds.asJava).build()) - cache.updateMetadata(1, updateMetadataRequest) - verifier.apply(cache) - } - - // KRaft=false Type=FULL, migration disabled - var updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setIsKRaftController(true) - updateMetadataRequest.data().setType(AbstractControlRequest.Type.FULL.toByte) - verifyMetadataCache(updateMetadataRequest, zkMigrationEnabled = false) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=FULL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(1, cache.getAllTopics().size) - assertFalse(cache.contains("test-topic-1")) - assertFalse(cache.contains("test-topic-1")) - } - - // KRaft=false Type=FULL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setIsKRaftController(false) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=INCREMENTAL - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setType(AbstractControlRequest.Type.INCREMENTAL.toByte) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - - // KRaft=true Type=UNKNOWN - updateMetadataRequest = updateMetadataRequestBuilder.apply() - updateMetadataRequest.data().setType(AbstractControlRequest.Type.UNKNOWN.toByte) - verifyMetadataCache(updateMetadataRequest) { cache => - assertEquals(3, cache.getAllTopics().size) - assertTrue(cache.contains("test-topic-1")) - assertTrue(cache.contains("test-topic-1")) - } - } - @Test def testGetOfflineReplicasConsidersDirAssignment(): Unit = { case class Broker(id: Int, dirs: util.List[Uuid]) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index ce45fcb6093b4..68bf1ec9922fb 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -34,7 +34,8 @@ import org.apache.kafka.common.{DirectoryId, IsolationLevel, Node, TopicIdPartit import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.{InvalidPidMappingException, KafkaStorageException} -import org.apache.kafka.common.message.LeaderAndIsrRequestData +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.{DeleteRecordsResponseData, LeaderAndIsrRequestData} import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState @@ -55,13 +56,10 @@ import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image._ import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} -import org.apache.kafka.metadata.migration.ZkMigrationState import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} -import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, MetadataVersion, OffsetAndEpoch, RequestLocal, StopPartition} -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.log.remote.storage._ import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.network.BrokerEndPoint @@ -6244,8 +6242,7 @@ class ReplicaManagerTest { private def imageFromTopics(topicsImage: TopicsImage): MetadataImage = { val featuresImageLatest = new FeaturesImage( Collections.emptyMap(), - MetadataVersion.latestProduction(), - ZkMigrationState.NONE) + MetadataVersion.latestProduction()) new MetadataImage( new MetadataProvenance(100L, 10, 1000L, true), featuresImageLatest, @@ -6478,20 +6475,6 @@ class ReplicaManagerTest { val newFoo0 = new TopicIdPartition(Uuid.fromString("JRCmVxWxQamFs4S8NXYufg"), new TopicPartition("foo", 0)) val bar0 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 0)) - def setupReplicaManagerForKRaftMigrationTest(): ReplicaManager = { - setupReplicaManagerWithMockedPurgatories( - brokerId = 3, - timer = new MockTimer(time), - aliveBrokerIds = Seq(0, 1, 2), - propsModifier = props => { - props.setProperty(KRaftConfigs.MIGRATION_ENABLED_CONFIG, "true") - props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "1000@localhost:9093") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") - props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT") - }, - defaultTopicRemoteLogStorageEnable = false) - } - def verifyPartitionIsOnlineAndHasId( replicaManager: ReplicaManager, topicIdPartition: TopicIdPartition @@ -6516,59 +6499,6 @@ class ReplicaManagerTest { assertEquals(HostedPartition.None, partition, s"Expected ${topicIdPartition} to be offline, but it was: ${partition}") } - @Test - def testFullLairDuringKRaftMigration(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) - verifyPartitionIsOnlineAndHasId(replicaManager, foo0) - verifyPartitionIsOnlineAndHasId(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testFullLairDuringKRaftMigrationRemovesOld(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest1 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest1, (_, _) => ()) - val becomeLeaderRequest2 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(2, becomeLeaderRequest2, (_, _) => ()) - - verifyPartitionIsOffline(replicaManager, foo0) - verifyPartitionIsOffline(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testFullLairDuringKRaftMigrationWithTopicRecreations(): Unit = { - val replicaManager = setupReplicaManagerForKRaftMigrationTest() - try { - val becomeLeaderRequest1 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(foo0, foo1, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest1, (_, _) => ()) - val becomeLeaderRequest2 = LogManagerTest.createLeaderAndIsrRequestForStrayDetection( - Seq(newFoo0, bar0), Seq(3, 4, 3)) - replicaManager.becomeLeaderOrFollower(2, becomeLeaderRequest2, (_, _) => ()) - - verifyPartitionIsOnlineAndHasId(replicaManager, newFoo0) - verifyPartitionIsOffline(replicaManager, foo1) - verifyPartitionIsOnlineAndHasId(replicaManager, bar0) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - @Test def testRemoteReadQuotaExceeded(): Unit = { when(mockRemoteLogManager.getFetchThrottleTimeMs).thenReturn(quotaExceededThrottleTime) @@ -6660,6 +6590,61 @@ class ReplicaManagerTest { } } + @Test + def testDeleteRecordsInternalTopicDeleteDisallowed(): Unit = { + val localId = 1 + val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) + val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + + val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) + val directoryIds = rm.logManager.directoryIdsSet.toList + assertEquals(directoryIds.size, 2) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) + val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) + + def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { + assert(responseStatus.values.head.errorCode == Errors.INVALID_TOPIC_EXCEPTION.code) + } + + // default internal topics delete disabled + rm.deleteRecords( + timeout = 0L, + Map[TopicPartition, Long](topicPartition0.topicPartition() -> 10L), + responseCallback = callback + ) + } + + @Test + def testDeleteRecordsInternalTopicDeleteAllowed(): Unit = { + val localId = 1 + val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) + val directoryEventHandler = mock(classOf[DirectoryEventHandler]) + + val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) + val directoryIds = rm.logManager.directoryIdsSet.toList + assertEquals(directoryIds.size, 2) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) + val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) + + def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { + assert(responseStatus.values.head.errorCode == Errors.NONE.code) + } + + // internal topics delete allowed + rm.deleteRecords( + timeout = 0L, + Map[TopicPartition, Long](topicPartition0.topicPartition() -> 0L), + responseCallback = callback, + allowInternalTopicDeletion = true + ) + } + private def readFromLogWithOffsetOutOfRange(tp: TopicPartition): Seq[(TopicIdPartition, LogReadResult)] = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true) try { diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index c81fa2b189d8b..b2447e7f7c59a 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -159,12 +159,11 @@ object TestUtils extends Logging { * @param config The configuration of the server */ def createServer(config: KafkaConfig, time: Time = Time.SYSTEM): KafkaServer = { - createServer(config, time, None, startup = true, enableZkApiForwarding = false) + createServer(config, time, None, startup = true) } - def createServer(config: KafkaConfig, time: Time, threadNamePrefix: Option[String], - startup: Boolean, enableZkApiForwarding: Boolean) = { - val server = new KafkaServer(config, time, threadNamePrefix, enableForwarding = enableZkApiForwarding) + def createServer(config: KafkaConfig, time: Time, threadNamePrefix: Option[String], startup: Boolean): KafkaServer = { + val server = new KafkaServer(config, time, threadNamePrefix) if (startup) server.startup() server } diff --git a/docs/ops.html b/docs/ops.html index 47b7498d25c94..c299d7c8dbd65 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -2753,7 +2753,7 @@

metrics.recording.level="info"
Client Metrics
-All of the following metrics have a recording level of info: +All the following metrics have a recording level of info: @@ -2800,7 +2800,7 @@
Thread Metrics
-All of the following metrics have a recording level of info: +All the following metrics have a recording level of info:
@@ -2850,7 +2850,7 @@
Task Metrics
-All of the following metrics have a recording level of debug, except for the dropped-records-* and +All the following metrics have a recording level of debug, except for the dropped-records-* and active-process-ratio metrics which have a recording level of info:
@@ -2943,7 +2943,7 @@
@@ -3026,17 +3026,17 @@
RocksDB may have an impact on performance. Statistics-based metrics are collected every minute from the RocksDB state stores. @@ -3355,7 +3355,7 @@
Record Cache Metrics
- All of the following metrics have a recording level of debug: + All the following metrics have a recording level of debug:
diff --git a/docs/upgrade.html b/docs/upgrade.html index dcd9c4334d75a..70b8000b9505d 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -59,6 +59,20 @@
Notable changes in 4 Please use log.message.timestamp.before.max.ms and log.message.timestamp.after.max.ms instead. See KIP-937 for details. +
  • + The remote.log.manager.copier.thread.pool.size configuration default value was changed to 10 from -1. + Values of -1 are no longer valid. A minimum of 1 or higher is valid. + See KIP-1030 +
  • +
  • + The remote.log.manager.expiration.thread.pool.size configuration default value was changed to 10 from -1. + Values of -1 are no longer valid. A minimum of 1 or higher is valid. + See KIP-1030 +
  • +
  • + The remote.log.manager.thread.pool.size configuration default value was changed to 2 from 10. + See KIP-1030 +
  • MirrorMaker diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle index 6f65f6e874fb8..37677869afbca 100644 --- a/gradle/dependencies.gradle +++ b/gradle/dependencies.gradle @@ -71,15 +71,15 @@ versions += [ jackson: "2.16.2", jacoco: "0.8.10", javassist: "3.29.2-GA", - jetty: "9.4.56.v20240826", - jersey: "2.39.1", + jetty: "12.0.15", + jersey: "3.1.9", jline: "3.25.1", jmh: "1.37", hamcrest: "2.2", scalaLogging: "3.9.5", jaxAnnotation: "1.3.2", jaxb: "2.3.1", - jaxrs: "2.1.1", + jakartaRs: "3.1.0", jfreechart: "1.0.0", jopt: "5.0.4", jose4j: "0.9.4", @@ -162,15 +162,15 @@ libs += [ jacksonModuleScala: "com.fasterxml.jackson.module:jackson-module-scala_$versions.baseScala:$versions.jackson", jacksonJDK8Datatypes: "com.fasterxml.jackson.datatype:jackson-datatype-jdk8:$versions.jackson", jacksonBlackbird: "com.fasterxml.jackson.module:jackson-module-blackbird:$versions.jackson", - jacksonJaxrsJsonProvider: "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:$versions.jackson", + jacksonJakartarsJsonProvider: "com.fasterxml.jackson.jakarta.rs:jackson-jakarta-rs-json-provider:$versions.jackson", jaxAnnotationApi: "javax.annotation:javax.annotation-api:$versions.jaxAnnotation", jaxbApi: "javax.xml.bind:jaxb-api:$versions.jaxb", - jaxrsApi: "javax.ws.rs:javax.ws.rs-api:$versions.jaxrs", + jakartaRsApi: "jakarta.ws.rs:jakarta.ws.rs-api:$versions.jakartaRs", javassist: "org.javassist:javassist:$versions.javassist", jettyServer: "org.eclipse.jetty:jetty-server:$versions.jetty", jettyClient: "org.eclipse.jetty:jetty-client:$versions.jetty", - jettyServlet: "org.eclipse.jetty:jetty-servlet:$versions.jetty", - jettyServlets: "org.eclipse.jetty:jetty-servlets:$versions.jetty", + jettyServlet: "org.eclipse.jetty.ee10:jetty-ee10-servlet:$versions.jetty", + jettyServlets: "org.eclipse.jetty.ee10:jetty-ee10-servlets:$versions.jetty", jerseyContainerServlet: "org.glassfish.jersey.containers:jersey-container-servlet:$versions.jersey", jerseyHk2: "org.glassfish.jersey.inject:jersey-hk2:$versions.jersey", jline: "org.jline:jline:$versions.jline", diff --git a/gradle/spotbugs-exclude.xml b/gradle/spotbugs-exclude.xml index b5a3c9bd96e9e..3f0f9efd16565 100644 --- a/gradle/spotbugs-exclude.xml +++ b/gradle/spotbugs-exclude.xml @@ -40,6 +40,13 @@ For a detailed description of spotbugs bug categories, see https://spotbugs.read + + + + + @@ -264,6 +271,12 @@ For a detailed description of spotbugs bug categories, see https://spotbugs.read + + + + + + p\n" + - " Processor: p (stores: [])\n" + - " --> KSTREAM-SINK-0000000001\n" + - " <-- KSTREAM-SOURCE-0000000000\n" + - " Sink: KSTREAM-SINK-0000000001 (topic: output)\n" + - " <-- p\n\n") - ); - - try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { - final TestInputTopic inputTopic = - driver.createInputTopic( - input, - new StringSerializer(), - new StringSerializer() - ); - final TestOutputTopic outputTopic = - driver.createOutputTopic( - output, - new StringDeserializer(), - new IntegerDeserializer() - ); - - inputTopic.pipeInput("A", "0", 5L); - inputTopic.pipeInput("B", "00", 100L); - inputTopic.pipeInput("C", "000", 0L); - inputTopic.pipeInput("D", "0000", 0L); - inputTopic.pipeInput("A", "00000", 10L); - inputTopic.pipeInput("A", "000000", 8L); - - final List> outputExpectRecords = new ArrayList<>(); - outputExpectRecords.add(new TestRecord<>("A", 1, Instant.ofEpochMilli(5L))); - outputExpectRecords.add(new TestRecord<>("B", 2, Instant.ofEpochMilli(100L))); - outputExpectRecords.add(new TestRecord<>("C", 3, Instant.ofEpochMilli(0L))); - outputExpectRecords.add(new TestRecord<>("D", 4, Instant.ofEpochMilli(0L))); - outputExpectRecords.add(new TestRecord<>("A", 5, Instant.ofEpochMilli(10L))); - outputExpectRecords.add(new TestRecord<>("A", 6, Instant.ofEpochMilli(8L))); - - assertEquals(outputTopic.readRecordsToList(), outputExpectRecords); - } - } - @Test public void shouldProcessValues() { final Consumed consumed = Consumed.with(Serdes.String(), Serdes.String()); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index f388441f3561d..d1068444cdb97 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -34,11 +34,11 @@ import org.apache.kafka.streams.kstream.JoinWindows; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.StreamJoined; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.InternalTopicConfig; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; +import org.apache.kafka.streams.processor.internals.StoreBuilderWrapper; import org.apache.kafka.streams.state.BuiltInDslStoreSuppliers; import org.apache.kafka.streams.state.DslWindowParams; import org.apache.kafka.streams.state.KeyValueStore; @@ -456,39 +456,42 @@ public void shouldThrottleEmitNonJoinedOuterRecordsEvenWhenClockDrift() { * This test is testing something internal to [[KStreamKStreamJoin]], so we had to setup low-level api manually. */ final KStreamImplJoin.TimeTrackerSupplier tracker = new KStreamImplJoin.TimeTrackerSupplier(); - final KStreamKStreamJoinRightSide join = new KStreamKStreamJoinRightSide<>( + final WindowStoreBuilder otherStoreBuilder = new WindowStoreBuilder<>( + new InMemoryWindowBytesStoreSupplier( "other", - new JoinWindowsInternal(JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(1000))), - (key, v1, v2) -> v1 + v2, - true, - Optional.of("outer"), - tracker); + 1000L, + 100, + false), + Serdes.String(), + Serdes.String(), + new MockTime()); + final KeyValueStoreBuilder, LeftOrRightValue> outerStoreBuilder = new KeyValueStoreBuilder<>( + new InMemoryKeyValueBytesStoreSupplier("outer"), + new TimestampedKeyAndJoinSideSerde<>(Serdes.String()), + new LeftOrRightValueSerde<>(Serdes.String(), Serdes.String()), + new MockTime() + ); + final KStreamKStreamJoinRightSide join = new KStreamKStreamJoinRightSide<>( + new JoinWindowsInternal(JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(1000))), + (key, v1, v2) -> v1 + v2, + true, + tracker, + StoreBuilderWrapper.wrapStoreBuilder(otherStoreBuilder), + Optional.of(StoreBuilderWrapper.wrapStoreBuilder(outerStoreBuilder))); + final Processor joinProcessor = join.get(); final MockInternalNewProcessorContext procCtx = new MockInternalNewProcessorContext<>(); - final WindowStore otherStore = new WindowStoreBuilder<>( - new InMemoryWindowBytesStoreSupplier( - "other", - 1000L, - 100, - false), - Serdes.String(), - Serdes.String(), - new MockTime()).build(); + final WindowStore otherStore = otherStoreBuilder.build(); - final KeyValueStore, LeftOrRightValue> outerStore = Mockito.spy( - new KeyValueStoreBuilder<>( - new InMemoryKeyValueBytesStoreSupplier("outer"), - new TimestampedKeyAndJoinSideSerde<>(Serdes.String()), - new LeftOrRightValueSerde<>(Serdes.String(), Serdes.String()), - new MockTime() - ).build()); + final KeyValueStore, LeftOrRightValue> outerStore = + Mockito.spy(outerStoreBuilder.build()); final GenericInMemoryKeyValueStore rootStore = new GenericInMemoryKeyValueStore<>("root"); - otherStore.init((StateStoreContext) procCtx, rootStore); + otherStore.init(procCtx, rootStore); procCtx.addStateStore(otherStore); - outerStore.init((StateStoreContext) procCtx, rootStore); + outerStore.init(procCtx, rootStore); procCtx.addStateStore(outerStore); joinProcessor.init(procCtx); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java index b0afe364985b0..5802518cd26a2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java @@ -21,13 +21,14 @@ import org.apache.kafka.common.config.TopicConfig; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.AutoOffsetReset; import org.apache.kafka.streams.StreamsConfig; -import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.TopologyConfig; import org.apache.kafka.streams.TopologyDescription; import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler; import org.apache.kafka.streams.errors.LogAndFailExceptionHandler; import org.apache.kafka.streams.errors.TopologyException; +import org.apache.kafka.streams.internals.AutoOffsetResetInternal; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.TopicNameExtractor; @@ -100,28 +101,43 @@ public class InternalTopologyBuilderTest { @Test public void shouldAddSourceWithOffsetReset() { + final String noneTopic = "noneTopic"; final String earliestTopic = "earliestTopic"; final String latestTopic = "latestTopic"; + final String durationTopic = "durationTopic"; - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, null, null, earliestTopic); - builder.addSource(Topology.AutoOffsetReset.LATEST, "source2", null, null, null, latestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.none()), "source0", null, null, null, noneTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source1", null, null, null, earliestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source2", null, null, null, latestTopic); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.byDuration(Duration.ofSeconds(42))), "source3", null, null, null, durationTopic); builder.initializeSubscription(); + assertThat(builder.offsetResetStrategy(noneTopic), equalTo(AutoOffsetResetStrategy.NONE)); assertThat(builder.offsetResetStrategy(earliestTopic), equalTo(AutoOffsetResetStrategy.EARLIEST)); assertThat(builder.offsetResetStrategy(latestTopic), equalTo(AutoOffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy(durationTopic).type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.offsetResetStrategy(durationTopic).duration().get().toSeconds(), equalTo(42L)); } @Test public void shouldAddSourcePatternWithOffsetReset() { + final String noneTopicPattern = "none.*Topic"; final String earliestTopicPattern = "earliest.*Topic"; final String latestTopicPattern = "latest.*Topic"; + final String durationTopicPattern = "duration.*Topic"; + + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.none()), "source0", null, null, null, Pattern.compile(noneTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "sourc1", null, null, null, Pattern.compile(earliestTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source2", null, null, null, Pattern.compile(latestTopicPattern)); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.byDuration(Duration.ofSeconds(42))), "source3", null, null, null, Pattern.compile(durationTopicPattern)); - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, null, null, Pattern.compile(earliestTopicPattern)); - builder.addSource(Topology.AutoOffsetReset.LATEST, "source2", null, null, null, Pattern.compile(latestTopicPattern)); builder.initializeSubscription(); + assertThat(builder.offsetResetStrategy("noneTestTopic"), equalTo(AutoOffsetResetStrategy.NONE)); assertThat(builder.offsetResetStrategy("earliestTestTopic"), equalTo(AutoOffsetResetStrategy.EARLIEST)); assertThat(builder.offsetResetStrategy("latestTestTopic"), equalTo(AutoOffsetResetStrategy.LATEST)); + assertThat(builder.offsetResetStrategy("durationTestTopic").type(), equalTo(AutoOffsetResetStrategy.StrategyType.BY_DURATION)); + assertThat(builder.offsetResetStrategy("durationTestTopic").duration().get().toSeconds(), equalTo(42L)); } @Test @@ -131,7 +147,7 @@ public void shouldAddSourceWithoutOffsetReset() { assertEquals(Collections.singletonList("test-topic"), builder.fullSourceTopicNames()); - assertThat(builder.offsetResetStrategy("test-topic"), equalTo(AutoOffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy("test-topic"), equalTo(null)); } @Test @@ -143,20 +159,20 @@ public void shouldAddPatternSourceWithoutOffsetReset() { assertThat(expectedPattern.pattern(), builder.sourceTopicPatternString(), equalTo("test-.*")); - assertThat(builder.offsetResetStrategy("test-topic"), equalTo(AutoOffsetResetStrategy.NONE)); + assertThat(builder.offsetResetStrategy("test-topic"), equalTo(null)); } @Test public void shouldNotAllowOffsetResetSourceWithoutTopics() { - assertThrows(TopologyException.class, () -> builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", + assertThrows(TopologyException.class, () -> builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer())); } @Test public void shouldNotAllowOffsetResetSourceWithDuplicateSourceName() { - builder.addSource(Topology.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.earliest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); try { - builder.addSource(Topology.AutoOffsetReset.LATEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); + builder.addSource(new AutoOffsetResetInternal(AutoOffsetReset.latest()), "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); fail("Should throw TopologyException for duplicate source name"); } catch (final TopologyException expected) { /* ok */ } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java index 8a3e6c17d8a19..e48d9275b3ab2 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java @@ -62,6 +62,7 @@ import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.errors.TaskCorruptedException; import org.apache.kafka.streams.errors.TaskMigratedException; +import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.internals.ConsumedInternal; import org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder; @@ -187,7 +188,7 @@ public class StreamThreadTest { private final MockTime mockTime = new MockTime(); private final String stateDir = TestUtils.tempDirectory().getPath(); private final MockClientSupplier clientSupplier = new MockClientSupplier(); - private final ConsumedInternal consumed = new ConsumedInternal<>(); + private final ConsumedInternal consumed = new ConsumedInternal<>(Consumed.with(null, null)); private final ChangelogReader changelogReader = new MockChangelogReader(); private StateDirectory stateDirectory = null; private final InternalTopologyBuilder internalTopologyBuilder = new InternalTopologyBuilder(); @@ -2087,7 +2088,7 @@ private void setupThread(final String storeName1, .count(Materialized.as(storeName1)); final MaterializedInternal> materialized = new MaterializedInternal<>(Materialized.as(storeName2), internalStreamsBuilder, ""); - internalStreamsBuilder.table(topic2, new ConsumedInternal<>(), materialized); + internalStreamsBuilder.table(topic2, new ConsumedInternal<>(Consumed.with(null, null)), materialized); internalStreamsBuilder.buildAndOptimizeTopology(); restoreConsumer.updatePartitions(changelogName1, diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java index 6cf72e91db3e7..592326fae87e8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java @@ -44,6 +44,7 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.TopologyWrapper; +import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Grouped; import org.apache.kafka.streams.kstream.JoinWindows; import org.apache.kafka.streams.kstream.KStream; @@ -2702,8 +2703,8 @@ public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount( final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder); - final KStream inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>()); - final KTable inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store"))); + final KStream inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>(Consumed.with(null, null))); + final KTable inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(Consumed.with(null, null)), new MaterializedInternal<>(Materialized.as("store"))); inputTopic .groupBy( (k, v) -> k, diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java index 1d81dffdd7813..c8b9aaedf5018 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedKeyValueBufferTest.java @@ -29,14 +29,13 @@ import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.internals.Change; -import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction; -import org.apache.kafka.test.MockInternalProcessorContext; +import org.apache.kafka.test.MockInternalNewProcessorContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.TestUtils; @@ -96,21 +95,21 @@ private void setup(final String testName, final Function bufferSuppli this.bufferSupplier = bufferSupplier; } - private static MockInternalProcessorContext makeContext() { + private static MockInternalNewProcessorContext makeContext() { final Properties properties = new Properties(); properties.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID); properties.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ""); final TaskId taskId = new TaskId(0, 0); - final MockInternalProcessorContext context = new MockInternalProcessorContext(properties, taskId, TestUtils.tempDirectory()); + final MockInternalNewProcessorContext context = new MockInternalNewProcessorContext<>(properties, taskId, TestUtils.tempDirectory()); context.setRecordCollector(new MockRecordCollector()); return context; } - private static void cleanup(final MockInternalProcessorContext context, final TimeOrderedKeyValueBuffer> buffer) { + private static void cleanup(final MockInternalNewProcessorContext context, final TimeOrderedKeyValueBuffer> buffer) { try { buffer.close(); Utils.delete(context.stateDir()); @@ -124,8 +123,8 @@ private static void cleanup(final MockInternalProcessorContext context, final Ti public void shouldInit(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); cleanup(context, buffer); } @@ -134,8 +133,8 @@ public void shouldInit(final String testName, final Function bufferSu public void shouldAcceptData(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "2p93nf"); cleanup(context, buffer); } @@ -145,8 +144,8 @@ public void shouldAcceptData(final String testName, final Function bu public void shouldRejectNullValues(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); try { buffer.put(0, new Record<>("asdf", null, 0L), getContext(0)); fail("expected an exception"); @@ -161,8 +160,8 @@ public void shouldRejectNullValues(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "qwer"); assertThat(buffer.numRecords(), is(1)); buffer.evictWhile(() -> true, kv -> { }); @@ -175,8 +174,8 @@ public void shouldRemoveData(final String testName, final Function bu public void shouldRespectEvictionPredicate(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "eyt"); putRecord(buffer, context, 1L, 0L, "zxcv", "rtg"); assertThat(buffer.numRecords(), is(2)); @@ -194,8 +193,8 @@ public void shouldRespectEvictionPredicate(final String testName, final Function public void shouldTrackCount(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "oin"); assertThat(buffer.numRecords(), is(1)); putRecord(buffer, context, 1L, 0L, "asdf", "wekjn"); @@ -210,8 +209,8 @@ public void shouldTrackCount(final String testName, final Function bu public void shouldTrackSize(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 0L, 0L, "asdf", "23roni"); assertThat(buffer.bufferSize(), is(43L)); putRecord(buffer, context, 1L, 0L, "asdf", "3l"); @@ -226,8 +225,8 @@ public void shouldTrackSize(final String testName, final Function buf public void shouldTrackMinTimestamp(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 1L, 0L, "asdf", "2093j"); assertThat(buffer.minTimestamp(), is(1L)); putRecord(buffer, context, 0L, 0L, "zxcv", "3gon4i"); @@ -240,8 +239,8 @@ public void shouldTrackMinTimestamp(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 1L, 0L, "zxcv", "o23i4"); assertThat(buffer.numRecords(), is(1)); @@ -288,8 +287,8 @@ public void shouldEvictOldestAndUpdateSizeAndCountAndMinTimestamp(final String t public void shouldReturnUndefinedOnPriorValueForNotBufferedKey(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); assertThat(buffer.priorValueForBuffered("ASDF"), is(Maybe.undefined())); } @@ -299,8 +298,8 @@ public void shouldReturnUndefinedOnPriorValueForNotBufferedKey(final String test public void shouldReturnPriorValueForBufferedKey(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final ProcessorRecordContext recordContext = getContext(0L); context.setRecordContext(recordContext); @@ -315,8 +314,8 @@ public void shouldReturnPriorValueForBufferedKey(final String testName, final Fu public void shouldFlush(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); putRecord(buffer, context, 2L, 0L, "asdf", "2093j"); putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i"); putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef"); @@ -388,8 +387,8 @@ public void shouldFlush(final String testName, final Function bufferS public void shouldRestoreOldUnversionedFormat(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -509,8 +508,8 @@ public void shouldRestoreOldUnversionedFormat(final String testName, final Funct public void shouldRestoreV1Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -633,8 +632,8 @@ public void shouldRestoreV1Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -759,8 +758,8 @@ public void shouldRestoreV3FormatWithV2Header(final String testName, final Funct // V2 header, so we need to be sure to handle this case as well. // Note the data is the same as the V3 test. final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -882,8 +881,8 @@ public void shouldRestoreV3FormatWithV2Header(final String testName, final Funct public void shouldRestoreV3Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -1005,8 +1004,8 @@ public void shouldRestoreV3Format(final String testName, final Function bufferSupplier) { setup(testName, bufferSupplier); final TimeOrderedKeyValueBuffer> buffer = bufferSupplier.apply(testName); - final MockInternalProcessorContext context = makeContext(); - buffer.init((StateStoreContext) context, buffer); + final MockInternalNewProcessorContext context = makeContext(); + buffer.init(context, buffer); final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName); @@ -1039,7 +1038,7 @@ public void shouldNotRestoreUnrecognizedVersionRecord(final String testName, fin } private static void putRecord(final TimeOrderedKeyValueBuffer> buffer, - final MockInternalProcessorContext context, + final MockInternalNewProcessorContext context, final long streamTime, final long recordTimestamp, final String key, @@ -1049,11 +1048,12 @@ private static void putRecord(final TimeOrderedKeyValueBuffer(key, new Change<>(value, null), 0L), recordContext); } + @SuppressWarnings("resource") private static BufferValue getBufferValue(final String value, final long timestamp) { return new BufferValue( null, null, - Serdes.String().serializer().serialize(null, value), + new StringSerializer().serialize(null, value), getContext(timestamp) ); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java index 8d189144504d9..57259cb31c4a1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/metrics/RocksDBBlockCacheMetricsTest.java @@ -26,12 +26,13 @@ import org.apache.kafka.streams.state.internals.BlockBasedTableConfigWithAccessibleCache; import org.apache.kafka.streams.state.internals.RocksDBStore; import org.apache.kafka.streams.state.internals.RocksDBTimestampedStore; -import org.apache.kafka.test.MockInternalProcessorContext; +import org.apache.kafka.test.MockInternalNewProcessorContext; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.Cache; import java.io.File; import java.io.IOException; @@ -57,8 +58,8 @@ public class RocksDBBlockCacheMetricsTest { public static Stream stores() { final File stateDir = TestUtils.tempDirectory("state"); return Stream.of( - Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)), - Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalProcessorContext(new Properties(), TASK_ID, stateDir)) + Arguments.of(new RocksDBStore(STORE_NAME, METRICS_SCOPE), new MockInternalNewProcessorContext<>(new Properties(), TASK_ID, stateDir)), + Arguments.of(new RocksDBTimestampedStore(STORE_NAME, METRICS_SCOPE), new MockInternalNewProcessorContext<>(new Properties(), TASK_ID, stateDir)) ); } @@ -79,8 +80,11 @@ static void withStore(final RocksDBStore store, final StateStoreContext context, @ParameterizedTest @MethodSource("stores") public void shouldRecordCorrectBlockCacheCapacity(final RocksDBStore store, final StateStoreContext ctx) { - withStore(store, ctx, () -> - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.CAPACITY_OF_BLOCK_CACHE, BigInteger.valueOf(50 * 1024 * 1024L))); + withStore( + store, + ctx, + () -> assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.CAPACITY_OF_BLOCK_CACHE, BigInteger.valueOf(50 * 1024 * 1024L)) + ); } @ParameterizedTest @@ -88,8 +92,10 @@ public void shouldRecordCorrectBlockCacheCapacity(final RocksDBStore store, fina public void shouldRecordCorrectBlockCacheUsage(final RocksDBStore store, final StateStoreContext ctx) { withStore(store, ctx, () -> { final BlockBasedTableConfigWithAccessibleCache tableFormatConfig = (BlockBasedTableConfigWithAccessibleCache) store.getOptions().tableFormatConfig(); - final long usage = tableFormatConfig.blockCache().getUsage(); - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + try (final Cache blockCache = tableFormatConfig.blockCache()) { + final long usage = blockCache.getUsage(); + assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + } }); } @@ -98,11 +104,14 @@ public void shouldRecordCorrectBlockCacheUsage(final RocksDBStore store, final S public void shouldRecordCorrectBlockCachePinnedUsage(final RocksDBStore store, final StateStoreContext ctx) { withStore(store, ctx, () -> { final BlockBasedTableConfigWithAccessibleCache tableFormatConfig = (BlockBasedTableConfigWithAccessibleCache) store.getOptions().tableFormatConfig(); - final long usage = tableFormatConfig.blockCache().getPinnedUsage(); - assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.PINNED_USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + try (final Cache blockCache = tableFormatConfig.blockCache()) { + final long usage = blockCache.getPinnedUsage(); + assertMetric(ctx, STATE_STORE_LEVEL_GROUP, RocksDBMetrics.PINNED_USAGE_OF_BLOCK_CACHE, BigInteger.valueOf(usage)); + } }); } + @SuppressWarnings("resource") public void assertMetric(final StateStoreContext context, final String group, final String metricName, final T expected) { final StreamsMetricsImpl metrics = ProcessorContextUtils.metricsImpl(context); final MetricName name = metrics.metricsRegistry().metricName( diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java index a3c7194680d79..507ac38906196 100644 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java +++ b/streams/src/test/java/org/apache/kafka/test/MockInternalNewProcessorContext.java @@ -41,12 +41,16 @@ import org.apache.kafka.streams.state.internals.ThreadCache.DirtyEntryFlushListener; import java.io.File; +import java.util.LinkedHashMap; +import java.util.Map; import java.util.Objects; import java.util.Properties; public class MockInternalNewProcessorContext extends MockProcessorContext implements InternalProcessorContext { - private ProcessorNode currentNode; + private ProcessorNode currentNode; + private RecordCollector recordCollector; + private final Map restoreCallbacks = new LinkedHashMap<>(); private long currentSystemTimeMs; private final TaskType taskType = TaskType.ACTIVE; @@ -108,12 +112,12 @@ public void setHeaders(final Headers headers) { } @Override - public void setCurrentNode(final ProcessorNode currentNode) { + public void setCurrentNode(final ProcessorNode currentNode) { this.currentNode = currentNode; } @Override - public ProcessorNode currentNode() { + public ProcessorNode currentNode() { return currentNode; } @@ -128,9 +132,19 @@ public void initialize() {} @Override public void uninitialize() {} + @Override + public RecordCollector recordCollector() { + return recordCollector; + } + + public void setRecordCollector(final RecordCollector recordCollector) { + this.recordCollector = recordCollector; + } + @Override public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback) { + restoreCallbacks.put(store.name(), stateRestoreCallback); addStateStore(store); } @@ -138,9 +152,14 @@ public void register(final StateStore store, public void register(final StateStore store, final StateRestoreCallback stateRestoreCallback, final CommitCallback checkpoint) { + restoreCallbacks.put(store.name(), stateRestoreCallback); addStateStore(store); } + public StateRestoreCallback stateRestoreCallback(final String storeName) { + return restoreCallbacks.get(storeName); + } + @Override public void forward(K key, V value) { throw new UnsupportedOperationException("Migrate to new implementation"); diff --git a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java b/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java deleted file mode 100644 index 4d4ad0e4dc088..0000000000000 --- a/streams/src/test/java/org/apache/kafka/test/MockInternalProcessorContext.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.test; - -import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.processor.CommitCallback; -import org.apache.kafka.streams.processor.StateRestoreCallback; -import org.apache.kafka.streams.processor.StateStore; -import org.apache.kafka.streams.processor.TaskId; -import org.apache.kafka.streams.processor.To; -import org.apache.kafka.streams.processor.api.FixedKeyRecord; -import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.api.RecordMetadata; -import org.apache.kafka.streams.processor.internals.InternalProcessorContext; -import org.apache.kafka.streams.processor.internals.ProcessorMetadata; -import org.apache.kafka.streams.processor.internals.ProcessorNode; -import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.RecordCollector; -import org.apache.kafka.streams.processor.internals.StreamTask; -import org.apache.kafka.streams.processor.internals.Task.TaskType; -import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.query.Position; -import org.apache.kafka.streams.state.internals.ThreadCache; -import org.apache.kafka.streams.state.internals.ThreadCache.DirtyEntryFlushListener; - -import java.io.File; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Properties; - -@SuppressWarnings("deprecation") -public class MockInternalProcessorContext extends org.apache.kafka.streams.processor.MockProcessorContext implements InternalProcessorContext { - - private final Map restoreCallbacks = new LinkedHashMap<>(); - private ProcessorNode currentNode; - private RecordCollector recordCollector; - private long currentSystemTimeMs; - private final TaskType taskType = TaskType.ACTIVE; - private ProcessorMetadata processorMetadata; - - public MockInternalProcessorContext() { - processorMetadata = new ProcessorMetadata(); - } - - public MockInternalProcessorContext(final Properties config, final TaskId taskId, final File stateDir) { - super(config, taskId, stateDir); - processorMetadata = new ProcessorMetadata(); - } - - @Override - public void setSystemTimeMs(long timeMs) { - currentSystemTimeMs = timeMs; - } - - @Override - public long currentSystemTimeMs() { - return currentSystemTimeMs; - } - - @Override - public StreamsMetricsImpl metrics() { - return (StreamsMetricsImpl) super.metrics(); - } - - @Override - public void forward(final Record record) { - forward(record.key(), record.value(), To.all().withTimestamp(record.timestamp())); - } - - @Override - public void forward(final Record record, final String childName) { - forward(record.key(), record.value(), To.child(childName).withTimestamp(record.timestamp())); - } - - @Override - public ProcessorRecordContext recordContext() { - return new ProcessorRecordContext(timestamp(), offset(), partition(), topic(), headers()); - } - - @Override - public Optional recordMetadata() { - return Optional.of(recordContext()); - } - - @Override - public void setRecordContext(final ProcessorRecordContext recordContext) { - setRecordMetadata( - recordContext.topic(), - recordContext.partition(), - recordContext.offset(), - recordContext.headers(), - recordContext.timestamp() - ); - } - - @Override - public void setCurrentNode(final ProcessorNode currentNode) { - this.currentNode = currentNode; - } - - @Override - public ProcessorNode currentNode() { - return currentNode; - } - - @Override - public ThreadCache cache() { - return null; - } - - @Override - public void initialize() {} - - @Override - public void uninitialize() {} - - @Override - public RecordCollector recordCollector() { - return recordCollector; - } - - public void setRecordCollector(final RecordCollector recordCollector) { - this.recordCollector = recordCollector; - } - - @Override - public void register(final StateStore store, - final StateRestoreCallback stateRestoreCallback) { - restoreCallbacks.put(store.name(), stateRestoreCallback); - super.register(store, stateRestoreCallback); - } - - @Override - public void register(final StateStore store, - final StateRestoreCallback stateRestoreCallback, - final CommitCallback checkpoint) { - restoreCallbacks.put(store.name(), stateRestoreCallback); - super.register(store, stateRestoreCallback); - } - - public StateRestoreCallback stateRestoreCallback(final String storeName) { - return restoreCallbacks.get(storeName); - } - - @Override - public TaskType taskType() { - return taskType; - } - - @Override - public void logChange(final String storeName, - final Bytes key, - final byte[] value, - final long timestamp, - final Position position) { - } - - @Override - public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) { - } - - @Override - public void transitionToStandby(final ThreadCache newCache) { - } - - @Override - public void registerCacheFlushListener(final String namespace, final DirtyEntryFlushListener listener) { - } - - @Override - public String changelogFor(final String storeName) { - return "mock-changelog"; - } - - @Override - public void addProcessorMetadataKeyValue(final String key, final long value) { - processorMetadata.put(key, value); - } - - @Override - public Long processorMetadataForKey(final String key) { - return processorMetadata.get(key); - } - - @Override - public void setProcessorMetadata(final ProcessorMetadata metadata) { - Objects.requireNonNull(metadata); - processorMetadata = metadata; - } - - @Override - public ProcessorMetadata processorMetadata() { - return processorMetadata; - } - - @Override - public void forward(final FixedKeyRecord record) { - forward(new Record<>(record.key(), record.value(), record.timestamp(), record.headers())); - } - - @Override - public void forward(final FixedKeyRecord record, final String childName) { - forward( - new Record<>(record.key(), record.value(), record.timestamp(), record.headers()), - childName - ); - } -} \ No newline at end of file diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala index 9a8034bac5af3..89f461a8fea89 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/Consumed.scala @@ -18,7 +18,7 @@ package org.apache.kafka.streams.scala.kstream import org.apache.kafka.common.serialization.Serde import org.apache.kafka.streams.kstream.{Consumed => ConsumedJ} -import org.apache.kafka.streams.Topology +import org.apache.kafka.streams.{AutoOffsetReset, Topology} import org.apache.kafka.streams.processor.TimestampExtractor object Consumed { @@ -36,12 +36,32 @@ object Consumed { * @param valueSerde the value serde to use. * @return a new instance of [[Consumed]] */ + @deprecated("Use `with` method that accepts `AutoOffsetReset` instead", "4.0.0") def `with`[K, V]( timestampExtractor: TimestampExtractor, resetPolicy: Topology.AutoOffsetReset )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = ConsumedJ.`with`(keySerde, valueSerde, timestampExtractor, resetPolicy) + /** + * Create an instance of [[Consumed]] with the supplied arguments. `null` values are acceptable. + * + * @tparam K key type + * @tparam V value type + * @param timestampExtractor the timestamp extractor to used. If `null` the default timestamp extractor from + * config will be used + * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config + * will be used + * @param keySerde the key serde to use. + * @param valueSerde the value serde to use. + * @return a new instance of [[Consumed]] + */ + def `with`[K, V]( + timestampExtractor: TimestampExtractor, + resetPolicy: AutoOffsetReset + )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = + ConsumedJ.`with`(keySerde, valueSerde, timestampExtractor, resetPolicy) + /** * Create an instance of [[Consumed]] with key and value Serdes. * @@ -74,8 +94,22 @@ object Consumed { * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config will be used * @return a new instance of [[Consumed]] */ + @deprecated("Use `with` method that accepts `AutoOffsetReset` instead", "4.0.0") def `with`[K, V]( resetPolicy: Topology.AutoOffsetReset )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = ConsumedJ.`with`(resetPolicy).withKeySerde(keySerde).withValueSerde(valueSerde) + + /** + * Create an instance of [[Consumed]] with a `org.apache.kafka.streams.AutoOffsetReset`. + * + * @tparam K key type + * @tparam V value type + * @param resetPolicy the offset reset policy to be used. If `null` the default reset policy from config will be used + * @return a new instance of [[Consumed]] + */ + def `with`[K, V]( + resetPolicy: AutoOffsetReset + )(implicit keySerde: Serde[K], valueSerde: Serde[V]): ConsumedJ[K, V] = + ConsumedJ.`with`(resetPolicy).withKeySerde(keySerde).withValueSerde(valueSerde) } diff --git a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala index 9db92525ced22..76918a6f742e2 100644 --- a/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala +++ b/streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/KStream.scala @@ -483,50 +483,6 @@ class KStream[K, V](val inner: KStreamJ[K, V]) { def toTable(named: Named, materialized: Materialized[K, V, ByteArrayKeyValueStore]): KTable[K, V] = new KTable(inner.toTable(named, materialized)) - /** - * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given - * `processorSupplier`). - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `Processor`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param processorSupplier a function that generates a `org.apache.kafka.streams.processor.Processor` - * @param stateStoreNames the names of the state store used by the processor - * @see `org.apache.kafka.streams.kstream.KStream#process` - */ - @deprecated(since = "3.0", message = "Use process(ProcessorSupplier, String*) instead.") - def process( - processorSupplier: () => org.apache.kafka.streams.processor.Processor[K, V], - stateStoreNames: String* - ): Unit = { - val processorSupplierJ: org.apache.kafka.streams.processor.ProcessorSupplier[K, V] = () => processorSupplier() - inner.process(processorSupplierJ, stateStoreNames: _*) - } - - /** - * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given - * `processorSupplier`). - * In order to assign a state, the state must be created and added via `addStateStore` before they can be connected - * to the `Processor`. - * It's not required to connect global state stores that are added via `addGlobalStore`; - * read-only access to global state stores is available by default. - * - * @param processorSupplier a function that generates a `org.apache.kafka.streams.processor.Processor` - * @param named a [[Named]] config used to name the processor in the topology - * @param stateStoreNames the names of the state store used by the processor - * @see `org.apache.kafka.streams.kstream.KStream#process` - */ - @deprecated(since = "3.0", message = "Use process(ProcessorSupplier, String*) instead.") - def process( - processorSupplier: () => org.apache.kafka.streams.processor.Processor[K, V], - named: Named, - stateStoreNames: String* - ): Unit = { - val processorSupplierJ: org.apache.kafka.streams.processor.ProcessorSupplier[K, V] = () => processorSupplier() - inner.process(processorSupplierJ, named, stateStoreNames: _*) - } - /** * Process all records in this stream, one record at a time, by applying a `Processor` (provided by the given * `processorSupplier`). diff --git a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala index 0b44165164b93..4656a4d12fcd6 100644 --- a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala +++ b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/ConsumedTest.scala @@ -16,7 +16,8 @@ */ package org.apache.kafka.streams.scala.kstream -import org.apache.kafka.streams.Topology +import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy +import org.apache.kafka.streams.AutoOffsetReset import org.apache.kafka.streams.kstream.internals.ConsumedInternal import org.apache.kafka.streams.processor.FailOnInvalidTimestamp import org.apache.kafka.streams.scala.serialization.Serdes @@ -38,15 +39,15 @@ class ConsumedTest { @Test def testCreateConsumedWithTimestampExtractorAndResetPolicy(): Unit = { val timestampExtractor = new FailOnInvalidTimestamp() - val resetPolicy = Topology.AutoOffsetReset.LATEST + val resetPolicy = AutoOffsetReset.latest() val consumed: Consumed[String, Long] = - Consumed.`with`[String, Long](timestampExtractor, resetPolicy) + Consumed.`with`(timestampExtractor, resetPolicy) val internalConsumed = new ConsumedInternal(consumed) assertEquals(Serdes.stringSerde.getClass, internalConsumed.keySerde.getClass) assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) assertEquals(timestampExtractor, internalConsumed.timestampExtractor) - assertEquals(resetPolicy, internalConsumed.offsetResetPolicy) + assertEquals(AutoOffsetResetStrategy.StrategyType.LATEST, internalConsumed.offsetResetPolicy.offsetResetStrategy()) } @Test @@ -59,14 +60,15 @@ class ConsumedTest { assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) assertEquals(timestampExtractor, internalConsumed.timestampExtractor) } + @Test def testCreateConsumedWithResetPolicy(): Unit = { - val resetPolicy = Topology.AutoOffsetReset.LATEST + val resetPolicy = AutoOffsetReset.latest() val consumed: Consumed[String, Long] = Consumed.`with`[String, Long](resetPolicy) val internalConsumed = new ConsumedInternal(consumed) assertEquals(Serdes.stringSerde.getClass, internalConsumed.keySerde.getClass) assertEquals(Serdes.longSerde.getClass, internalConsumed.valueSerde.getClass) - assertEquals(resetPolicy, internalConsumed.offsetResetPolicy) + assertEquals(AutoOffsetResetStrategy.StrategyType.LATEST, internalConsumed.offsetResetPolicy.offsetResetStrategy()) } } diff --git a/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java b/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java index 24df147b21ea0..2face50ca2fc3 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/KafkaClusterTestKit.java @@ -539,15 +539,14 @@ public Map controllers() { public Controller waitForActiveController() throws InterruptedException { AtomicReference active = new AtomicReference<>(null); - TestUtils.retryOnExceptionWithTimeout(() -> { + TestUtils.waitForCondition(() -> { for (ControllerServer controllerServer : controllers.values()) { if (controllerServer.controller().isActive()) { active.set(controllerServer.controller()); } } - if (active.get() == null) - throw new RuntimeException("Controller not active"); - }); + return active.get() != null; + }, 60_000, "Controller not active"); return active.get(); } diff --git a/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java b/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java index f55b1be5c05da..d5f98be24b740 100644 --- a/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java +++ b/test-common/src/main/java/org/apache/kafka/common/test/TestUtils.java @@ -53,7 +53,6 @@ public class TestUtils { private static final long DEFAULT_POLL_INTERVAL_MS = 100; private static final long DEFAULT_MAX_WAIT_MS = 15_000; - private static final long DEFAULT_TIMEOUT_MS = 60_000; /** * Create an empty file in the default temporary-file directory, using `kafka` as the prefix and `tmp` as the @@ -117,40 +116,26 @@ public static void waitForCondition(final Supplier testCondition, final */ public static void waitForCondition(final Supplier testCondition, final long maxWaitMs, - String conditionDetails - ) throws InterruptedException { - retryOnExceptionWithTimeout(() -> { - String conditionDetail = conditionDetails == null ? "" : conditionDetails; - if (!testCondition.get()) - throw new TimeoutException("Condition not met within timeout " + maxWaitMs + ". " + conditionDetail); - }); - } - - /** - * Wait for the given runnable to complete successfully, i.e. throw now {@link Exception}s or - * {@link AssertionError}s, or for the given timeout to expire. If the timeout expires then the - * last exception or assertion failure will be thrown thus providing context for the failure. - * - * @param runnable the code to attempt to execute successfully. - * @throws InterruptedException if the current thread is interrupted while waiting for {@code runnable} to complete successfully. - */ - static void retryOnExceptionWithTimeout(final Runnable runnable) throws InterruptedException { - final long expectedEnd = System.currentTimeMillis() + DEFAULT_TIMEOUT_MS; + String conditionDetails) throws InterruptedException { + final long expectedEnd = System.currentTimeMillis() + maxWaitMs; while (true) { try { - runnable.run(); - return; + if (testCondition.get()) { + return; + } + String conditionDetail = conditionDetails == null ? "" : conditionDetails; + throw new TimeoutException("Condition not met: " + conditionDetail); } catch (final AssertionError t) { if (expectedEnd <= System.currentTimeMillis()) { throw t; } } catch (final Exception e) { if (expectedEnd <= System.currentTimeMillis()) { - throw new AssertionError(format("Assertion failed with an exception after %s ms", DEFAULT_TIMEOUT_MS), e); + throw new AssertionError(format("Assertion failed with an exception after %s ms", maxWaitMs), e); } } - Thread.sleep(DEFAULT_POLL_INTERVAL_MS); + Thread.sleep(Math.min(DEFAULT_POLL_INTERVAL_MS, maxWaitMs)); } } diff --git a/tests/kafkatest/services/verifiable_client.py b/tests/kafkatest/services/verifiable_client.py index 4971136a64e78..4a3ea5e17da0b 100644 --- a/tests/kafkatest/services/verifiable_client.py +++ b/tests/kafkatest/services/verifiable_client.py @@ -70,7 +70,7 @@ * `--group-id ` * `--topic ` * `--broker-list ` - * `--session-timeout ` + * `--session-timeout ` - note that this configuration is not supported when group protocol is consumer * `--enable-autocommit` * `--max-messages ` * `--assignment-strategy ` diff --git a/tests/kafkatest/services/verifiable_consumer.py b/tests/kafkatest/services/verifiable_consumer.py index 04500283d7831..8264566f1c2b9 100644 --- a/tests/kafkatest/services/verifiable_consumer.py +++ b/tests/kafkatest/services/verifiable_consumer.py @@ -231,10 +231,10 @@ class VerifiableConsumer(KafkaPathResolverMixin, VerifiableClientMixin, Backgrou } def __init__(self, context, num_nodes, kafka, topic, group_id, - static_membership=False, max_messages=-1, session_timeout_sec=30, enable_autocommit=False, + static_membership=False, max_messages=-1, session_timeout_sec=0, enable_autocommit=False, assignment_strategy=None, group_protocol=None, group_remote_assignor=None, version=DEV_BRANCH, stop_timeout_sec=30, log_level="INFO", jaas_override_variables=None, - on_record_consumed=None, reset_policy="earliest", verify_offsets=True): + on_record_consumed=None, reset_policy="earliest", verify_offsets=True, prop_file=""): """ :param jaas_override_variables: A dict of variables to be used in the jaas.conf template file """ @@ -251,9 +251,7 @@ def __init__(self, context, num_nodes, kafka, topic, group_id, self.session_timeout_sec = session_timeout_sec self.enable_autocommit = enable_autocommit self.assignment_strategy = assignment_strategy - self.group_protocol = group_protocol - self.group_remote_assignor = group_remote_assignor - self.prop_file = "" + self.prop_file = prop_file self.stop_timeout_sec = stop_timeout_sec self.on_record_consumed = on_record_consumed self.verify_offsets = verify_offsets @@ -417,10 +415,12 @@ def start_cmd(self, node): else: cmd += " --bootstrap-server %s" % self.kafka.bootstrap_servers(self.security_config.security_protocol) - cmd += " --reset-policy %s --group-id %s --topic %s --session-timeout %s" % \ - (self.reset_policy, self.group_id, self.topic, - self.session_timeout_sec*1000) - + cmd += " --reset-policy %s --group-id %s --topic %s" % \ + (self.reset_policy, self.group_id, self.topic) + + if self.session_timeout_sec > 0: + cmd += " --session-timeout %s" % self.session_timeout_sec*1000 + if self.max_messages > 0: cmd += " --max-messages %s" % str(self.max_messages) diff --git a/tests/kafkatest/tests/client/consumer_protocol_migration_test.py b/tests/kafkatest/tests/client/consumer_protocol_migration_test.py index 07f501fe0c69b..a03228b617a2a 100644 --- a/tests/kafkatest/tests/client/consumer_protocol_migration_test.py +++ b/tests/kafkatest/tests/client/consumer_protocol_migration_test.py @@ -77,7 +77,7 @@ def rolling_bounce_consumers(self, consumer, clean_shutdown=True): consumer.stop_node(node, clean_shutdown) wait_until(lambda: len(consumer.dead_nodes()) == 1, - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") consumer.start_node(node) diff --git a/tests/kafkatest/tests/client/consumer_test.py b/tests/kafkatest/tests/client/consumer_test.py index 4bd680dd2a00e..bc35cab220f6b 100644 --- a/tests/kafkatest/tests/client/consumer_test.py +++ b/tests/kafkatest/tests/client/consumer_test.py @@ -39,7 +39,7 @@ def rolling_bounce_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_ consumer.stop_node(node, clean_shutdown) wait_until(lambda: len(consumer.dead_nodes()) == 1, - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") consumer.start_node(node) @@ -89,7 +89,7 @@ def test_broker_rolling_bounce(self, metadata_quorum=quorum.zk, use_new_coordina Verify correct consumer behavior when the brokers are consecutively restarted. Setup: single Kafka cluster with one producer writing messages to a single topic with one - partition, an a set of consumers in the same group reading from the same topic. + partition, a set of consumers in the same group reading from the same topic. - Start a producer which continues producing new messages throughout the test. - Start up the consumers and wait until they've joined the group. @@ -101,15 +101,10 @@ def test_broker_rolling_bounce(self, metadata_quorum=quorum.zk, use_new_coordina partition = TopicPartition(self.TOPIC, 0) producer = self.setup_producer(self.TOPIC) - # The consumers' session timeouts must exceed the time it takes for a broker to roll. Consumers are likely - # to see cluster metadata consisting of just a single alive broker in the case where the cluster has just 2 - # brokers and the cluster is rolling (which is what is happening here). When the consumer sees a single alive - # broker, and then that broker rolls, the consumer will be unable to connect to the cluster until that broker - # completes its roll. In the meantime, the consumer group will move to the group coordinator on the other - # broker, and that coordinator will fail the consumer and trigger a group rebalance if its session times out. - # This test is asserting that no rebalances occur, so we increase the session timeout for this to be the case. - self.session_timeout_sec = 30 - consumer = self.setup_consumer(self.TOPIC, group_protocol=group_protocol) + # Due to KIP-899, which rebootstrap is performed when there are no available brokers in the current metadata. + # We disable rebootstrapping by setting `metadata.recovery.strategy=none` for the consumer, as the test expects no metadata changes. + # see KAFKA-18194 + consumer = self.setup_consumer(self.TOPIC, group_protocol=group_protocol, prop_file="metadata.recovery.strategy=none") producer.start() self.await_produced_messages(producer) @@ -229,7 +224,6 @@ def test_static_consumer_bounce_with_eager_assignment(self, clean_shutdown, stat producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=static_membership, group_protocol=group_protocol, assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor") @@ -295,7 +289,6 @@ def test_static_consumer_persisted_after_rejoin(self, bounce_mode, metadata_quor producer = self.setup_producer(self.TOPIC) producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=True, group_protocol=group_protocol) consumer.start() self.await_all_members(consumer) @@ -340,7 +333,6 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me producer.start() self.await_produced_messages(producer) - self.session_timeout_sec = 60 consumer = self.setup_consumer(self.TOPIC, static_membership=True, group_protocol=group_protocol) self.num_consumers = num_conflict_consumers @@ -372,7 +364,7 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me # Stop existing nodes, so conflicting ones should be able to join. consumer.stop_all() wait_until(lambda: len(consumer.dead_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for the consumer to shutdown") conflict_consumer.start() self.await_members(conflict_consumer, num_conflict_consumers) @@ -383,13 +375,13 @@ def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage, me conflict_consumer.start() wait_until(lambda: len(consumer.joined_nodes()) + len(conflict_consumer.joined_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec*2, + timeout_sec=60, err_msg="Timed out waiting for consumers to join, expected total %d joined, but only see %d joined from " "normal consumer group and %d from conflict consumer group" % \ (len(consumer.nodes), len(consumer.joined_nodes()), len(conflict_consumer.joined_nodes())) ) wait_until(lambda: len(consumer.dead_nodes()) + len(conflict_consumer.dead_nodes()) == len(conflict_consumer.nodes), - timeout_sec=self.session_timeout_sec*2, + timeout_sec=60, err_msg="Timed out waiting for fenced consumers to die, expected total %d dead, but only see %d dead in " "normal consumer group and %d dead in conflict consumer group" % \ (len(conflict_consumer.nodes), len(consumer.dead_nodes()), len(conflict_consumer.dead_nodes())) @@ -427,7 +419,7 @@ def test_consumer_failure(self, clean_shutdown, enable_autocommit, metadata_quor # stop the partition owner and await its shutdown consumer.kill_node(partition_owner, clean_shutdown=clean_shutdown) wait_until(lambda: len(consumer.joined_nodes()) == (self.num_consumers - 1) and consumer.owner(partition) is not None, - timeout_sec=self.session_timeout_sec*2+5, + timeout_sec=60, err_msg="Timed out waiting for consumer to close") # ensure that the remaining consumer does some work after rebalancing diff --git a/tests/kafkatest/tests/client/pluggable_test.py b/tests/kafkatest/tests/client/pluggable_test.py index b2f726e016303..8f74ec1c8a52d 100644 --- a/tests/kafkatest/tests/client/pluggable_test.py +++ b/tests/kafkatest/tests/client/pluggable_test.py @@ -52,5 +52,5 @@ def test_start_stop(self, metadata_quorum=quorum.zk): self.logger.debug("Waiting for %d nodes to stop" % len(consumer.nodes)) wait_until(lambda: len(consumer.dead_nodes()) == len(consumer.nodes), - timeout_sec=self.session_timeout_sec+5, + timeout_sec=60, err_msg="Timed out waiting for consumers to shutdown") diff --git a/tests/kafkatest/tests/verifiable_consumer_test.py b/tests/kafkatest/tests/verifiable_consumer_test.py index 08da754732d46..5353a6e82af54 100644 --- a/tests/kafkatest/tests/verifiable_consumer_test.py +++ b/tests/kafkatest/tests/verifiable_consumer_test.py @@ -24,13 +24,12 @@ class VerifiableConsumerTest(KafkaTest): PRODUCER_REQUEST_TIMEOUT_SEC = 30 def __init__(self, test_context, num_consumers=1, num_producers=0, - group_id="test_group_id", session_timeout_sec=10, **kwargs): + group_id="test_group_id", **kwargs): super(VerifiableConsumerTest, self).__init__(test_context, **kwargs) self.num_consumers = num_consumers self.num_producers = num_producers self.group_id = group_id - self.session_timeout_sec = session_timeout_sec - self.consumption_timeout_sec = max(self.PRODUCER_REQUEST_TIMEOUT_SEC + 5, 2 * session_timeout_sec) + self.consumption_timeout_sec = self.PRODUCER_REQUEST_TIMEOUT_SEC + 5 def _all_partitions(self, topic, num_partitions): partitions = set() @@ -56,7 +55,7 @@ def min_cluster_size(self): def setup_consumer(self, topic, static_membership=False, enable_autocommit=False, assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor", group_remote_assignor="range", **kwargs): return VerifiableConsumer(self.test_context, self.num_consumers, self.kafka, - topic, self.group_id, static_membership=static_membership, session_timeout_sec=self.session_timeout_sec, + topic, self.group_id, static_membership=static_membership, assignment_strategy=assignment_strategy, enable_autocommit=enable_autocommit, group_remote_assignor=group_remote_assignor, log_level="TRACE", **kwargs) @@ -81,9 +80,9 @@ def await_consumed_messages(self, consumer, min_messages=1): def await_members(self, consumer, num_consumers): # Wait until all members have joined the group wait_until(lambda: len(consumer.joined_nodes()) == num_consumers, - timeout_sec=self.session_timeout_sec*2, + timeout_sec=60, err_msg="Consumers failed to join in a reasonable amount of time") - + def await_all_members(self, consumer): self.await_members(consumer, self.num_consumers) diff --git a/tools/src/main/java/org/apache/kafka/tools/ClusterTool.java b/tools/src/main/java/org/apache/kafka/tools/ClusterTool.java index f4699d221e20d..370d756d493d7 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ClusterTool.java +++ b/tools/src/main/java/org/apache/kafka/tools/ClusterTool.java @@ -17,6 +17,8 @@ package org.apache.kafka.tools; import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.DescribeClusterOptions; +import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.utils.Exit; import org.apache.kafka.common.utils.Utils; @@ -31,11 +33,13 @@ import java.io.PrintStream; import java.util.Arrays; +import java.util.Collection; import java.util.Optional; import java.util.Properties; import java.util.concurrent.ExecutionException; import static net.sourceforge.argparse4j.impl.Arguments.store; +import static net.sourceforge.argparse4j.impl.Arguments.storeTrue; public class ClusterTool { @@ -68,7 +72,9 @@ static void execute(String... args) throws Exception { .help("Get information about the ID of a cluster."); Subparser unregisterParser = subparsers.addParser("unregister") .help("Unregister a broker."); - for (Subparser subpparser : Arrays.asList(clusterIdParser, unregisterParser)) { + Subparser listEndpoints = subparsers.addParser("list-endpoints") + .help("List endpoints"); + for (Subparser subpparser : Arrays.asList(clusterIdParser, unregisterParser, listEndpoints)) { MutuallyExclusiveGroup connectionOptions = subpparser.addMutuallyExclusiveGroup().required(true); connectionOptions.addArgument("--bootstrap-server", "-b") .action(store()) @@ -85,6 +91,9 @@ static void execute(String... args) throws Exception { .action(store()) .required(true) .help("The ID of the broker to unregister."); + listEndpoints.addArgument("--include-fenced-brokers") + .action(storeTrue()) + .help("Whether to include fenced brokers when listing broker endpoints"); Namespace namespace = parser.parseArgsOrFail(args); String command = namespace.getString("command"); @@ -108,6 +117,17 @@ static void execute(String... args) throws Exception { } break; } + case "list-endpoints": { + try (Admin adminClient = Admin.create(properties)) { + boolean includeFencedBrokers = Optional.of(namespace.getBoolean("include_fenced_brokers")).orElse(false); + boolean listControllerEndpoints = namespace.getString("bootstrap_controller") != null; + if (includeFencedBrokers && listControllerEndpoints) { + throw new IllegalArgumentException("The option --include-fenced-brokers is only supported with --bootstrap-server option"); + } + listEndpoints(System.out, adminClient, listControllerEndpoints, includeFencedBrokers); + } + break; + } default: throw new RuntimeException("Unknown command " + command); } @@ -135,4 +155,44 @@ static void unregisterCommand(PrintStream stream, Admin adminClient, int id) thr } } } + + static void listEndpoints(PrintStream stream, Admin adminClient, boolean listControllerEndpoints, boolean includeFencedBrokers) throws Exception { + try { + DescribeClusterOptions option = new DescribeClusterOptions().includeFencedBrokers(includeFencedBrokers); + Collection nodes = adminClient.describeCluster(option).nodes().get(); + + String maxHostLength = String.valueOf(nodes.stream().map(node -> node.host().length()).max(Integer::compareTo).orElse(100)); + String maxRackLength = String.valueOf(nodes.stream().filter(node -> node.hasRack()).map(node -> node.rack().length()).max(Integer::compareTo).orElse(10)); + + if (listControllerEndpoints) { + String format = "%-10s %-" + maxHostLength + "s %-10s %-" + maxRackLength + "s %-15s%n"; + stream.printf(format, "ID", "HOST", "PORT", "RACK", "ENDPOINT_TYPE"); + nodes.stream().forEach(node -> stream.printf(format, + node.idString(), + node.host(), + node.port(), + node.rack(), + "controller" + )); + } else { + String format = "%-10s %-" + maxHostLength + "s %-10s %-" + maxRackLength + "s %-10s %-15s%n"; + stream.printf(format, "ID", "HOST", "PORT", "RACK", "STATE", "ENDPOINT_TYPE"); + nodes.stream().forEach(node -> stream.printf(format, + node.idString(), + node.host(), + node.port(), + node.rack(), + node.isFenced() ? "fenced" : "unfenced", + "broker" + )); + } + } catch (ExecutionException ee) { + Throwable cause = ee.getCause(); + if (cause instanceof UnsupportedVersionException) { + stream.println(ee.getCause().getMessage()); + } else { + throw ee; + } + } + } } diff --git a/tools/src/main/java/org/apache/kafka/tools/VerifiableConsumer.java b/tools/src/main/java/org/apache/kafka/tools/VerifiableConsumer.java index 0436e0d85e8f4..825e5ed2c6ec0 100644 --- a/tools/src/main/java/org/apache/kafka/tools/VerifiableConsumer.java +++ b/tools/src/main/java/org/apache/kafka/tools/VerifiableConsumer.java @@ -574,11 +574,10 @@ private static ArgumentParser argParser() { parser.addArgument("--session-timeout") .action(store()) .required(false) - .setDefault(30000) .type(Integer.class) .metavar("TIMEOUT_MS") .dest("sessionTimeout") - .help("Set the consumer's session timeout"); + .help("Set the consumer's session timeout, note that this configuration is not supported when group protocol is consumer"); parser.addArgument("--verbose") .action(storeTrue()) @@ -649,10 +648,15 @@ public static VerifiableConsumer createFromArgs(ArgumentParser parser, String[] if (groupRemoteAssignor != null) consumerProps.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, groupRemoteAssignor); } else { - // This means we're using the old consumer group protocol. + // This means we're using the CLASSIC consumer group protocol. consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, res.getString("assignmentStrategy")); } + Integer sessionTimeout = res.getInt("sessionTimeout"); + if (sessionTimeout != null) { + consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(sessionTimeout)); + } + consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, res.getString("groupId")); String groupInstanceId = res.getString("groupInstanceId"); @@ -664,7 +668,6 @@ public static VerifiableConsumer createFromArgs(ArgumentParser parser, String[] consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, useAutoCommit); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, res.getString("resetPolicy")); - consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(res.getInt("sessionTimeout"))); StringDeserializer deserializer = new StringDeserializer(); KafkaConsumer consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer); diff --git a/tools/src/test/java/org/apache/kafka/tools/AbstractResetIntegrationTest.java b/tools/src/test/java/org/apache/kafka/tools/AbstractResetIntegrationTest.java index 9d0f030b5b191..b0b99d0563802 100644 --- a/tools/src/test/java/org/apache/kafka/tools/AbstractResetIntegrationTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/AbstractResetIntegrationTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.GroupProtocol; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.config.types.Password; @@ -127,6 +128,7 @@ private void prepareConfigs(final String appID) { resultConsumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); resultConsumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); resultConsumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class); + resultConsumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name()); resultConsumerConfig.putAll(commonClientConfig); streamsConfig = new Properties(); @@ -406,6 +408,7 @@ protected boolean tryCleanGlobal(final boolean withIntermediateTopics, final Properties cleanUpConfig = new Properties(); cleanUpConfig.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 100); cleanUpConfig.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(CLEANUP_CONSUMER_TIMEOUT)); + cleanUpConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name); return new StreamsResetter().execute(parameters, cleanUpConfig) == 0; } diff --git a/tools/src/test/java/org/apache/kafka/tools/BrokerApiVersionsCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/BrokerApiVersionsCommandTest.java index 74919740170d0..aac6a3f48ffa9 100644 --- a/tools/src/test/java/org/apache/kafka/tools/BrokerApiVersionsCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/BrokerApiVersionsCommandTest.java @@ -54,7 +54,7 @@ public void testBrokerApiVersionsCommandOutput(ClusterInstance clusterInstance) BrokerApiVersionsCommand.mainNoExit("--bootstrap-server", clusterInstance.bootstrapServers())); Iterator lineIter = Arrays.stream(output.split("\n")).iterator(); assertTrue(lineIter.hasNext()); - assertEquals(clusterInstance.bootstrapServers() + " (id: 0 rack: null) -> (", lineIter.next()); + assertEquals(clusterInstance.bootstrapServers() + " (id: 0 rack: null isFenced: false) -> (", lineIter.next()); ApiMessageType.ListenerType listenerType = ApiMessageType.ListenerType.BROKER; diff --git a/tools/src/test/java/org/apache/kafka/tools/ClusterToolTest.java b/tools/src/test/java/org/apache/kafka/tools/ClusterToolTest.java index d27839269f469..b21795411e269 100644 --- a/tools/src/test/java/org/apache/kafka/tools/ClusterToolTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/ClusterToolTest.java @@ -29,8 +29,11 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -61,6 +64,34 @@ public void testUnregister(ClusterInstance clusterInstance) { assertTrue(output.contains("Broker " + brokerId + " is no longer registered.")); } + @ClusterTest(brokers = 1, types = {Type.KRAFT, Type.CO_KRAFT}) + public void testListEndpointsWithBootstrapServer(ClusterInstance clusterInstance) { + String output = ToolsTestUtils.captureStandardOut(() -> + assertDoesNotThrow(() -> ClusterTool.execute("list-endpoints", "--bootstrap-server", clusterInstance.bootstrapServers()))); + String port = clusterInstance.bootstrapServers().split(":")[1]; + int id = clusterInstance.brokerIds().iterator().next(); + String format = "%-10s %-9s %-10s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s %-6s"; + String expected = String.format(format, "ID", "HOST", "PORT", "RACK", "STATE", "ENDPOINT_TYPE", id, "localhost", port, "null", "unfenced", "broker"); + assertEquals(expected, output); + } + + @ClusterTest(brokers = 2, types = {Type.KRAFT, Type.CO_KRAFT}) + public void testListEndpointsArgumentWithBootstrapServer(ClusterInstance clusterInstance) { + List brokerIds = clusterInstance.brokerIds().stream().collect(Collectors.toList()); + clusterInstance.shutdownBroker(brokerIds.get(0)); + + List ports = Arrays.stream(clusterInstance.bootstrapServers().split(",")).map(b -> b.split(":")[1]).collect(Collectors.toList()); + String format = "%-10s %-9s %-10s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s %-6s"; + String expected = String.format(format, + "ID", "HOST", "PORT", "RACK", "STATE", "ENDPOINT_TYPE", + brokerIds.get(0), "localhost", ports.get(0), "null", "fenced", "broker", + brokerIds.get(1), "localhost", ports.get(1), "null", "unfenced", "broker"); + + String output = ToolsTestUtils.captureStandardOut(() -> assertDoesNotThrow(() -> ClusterTool.execute("list-endpoints", "--bootstrap-server", clusterInstance.bootstrapServers(), "--include-fenced-brokers"))); + + assertEquals(expected, output); + } + @ClusterTest(types = {Type.KRAFT, Type.CO_KRAFT}) public void testClusterIdWithBootstrapController(ClusterInstance clusterInstance) { String output = ToolsTestUtils.captureStandardOut(() -> @@ -83,6 +114,25 @@ public void testUnregisterWithBootstrapController(ClusterInstance clusterInstanc "the controller quorum.", exception.getCause().getMessage()); } + @ClusterTest(brokers = 3, types = {Type.KRAFT, Type.CO_KRAFT}) + public void testListEndpointsWithBootstrapController(ClusterInstance clusterInstance) { + String output = ToolsTestUtils.captureStandardOut(() -> + assertDoesNotThrow(() -> ClusterTool.execute("list-endpoints", "--bootstrap-controller", clusterInstance.bootstrapControllers()))); + String port = clusterInstance.bootstrapControllers().split(":")[1]; + int id = clusterInstance.controllerIds().iterator().next(); + String format = "%-10s %-9s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s"; + String expected = String.format(format, "ID", "HOST", "PORT", "RACK", "ENDPOINT_TYPE", id, "localhost", port, "null", "controller"); + assertTrue(output.equals(expected)); + } + + @ClusterTest(brokers = 3, types = {Type.KRAFT, Type.CO_KRAFT}) + public void testListEndpointsArgumentWithBootstrapController(ClusterInstance clusterInstance) { + RuntimeException exception = + assertThrows(RuntimeException.class, + () -> ClusterTool.execute("list-endpoints", "--bootstrap-controller", clusterInstance.bootstrapControllers(), "--include-fenced-brokers")); + assertEquals("The option --include-fenced-brokers is only supported with --bootstrap-server option", exception.getMessage()); + } + @Test public void testPrintClusterId() throws Exception { Admin adminClient = new MockAdminClient.Builder(). diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java index 7a134ac0c9610..c292aa1c845d0 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java @@ -103,6 +103,7 @@ public void testAdminRequestsForDescribeOffsets() throws Exception { } @Test + @SuppressWarnings("deprecation") public void testAdminRequestsForDescribeNegativeOffsets() throws Exception { String[] args = new String[]{"--bootstrap-server", "localhost:9092", "--group", GROUP, "--describe", "--offsets"}; ConsumerGroupCommand.ConsumerGroupService groupService = consumerGroupService(args); @@ -232,6 +233,7 @@ protected Admin createAdminClient(Map configOverrides) { }; } + @SuppressWarnings("deprecation") private DescribeConsumerGroupsResult describeGroupsResult(GroupState groupState) { MemberDescription member1 = new MemberDescription("member1", Optional.of("instance1"), "client1", "host1", null); ConsumerGroupDescription description = new ConsumerGroupDescription(GROUP, diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentClient.java b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentClient.java index 91dd6ee1d9787..fb3a7d8162ffd 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentClient.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentClient.java @@ -49,7 +49,7 @@ import java.util.List; import java.util.Map; -import javax.ws.rs.core.UriBuilder; +import jakarta.ws.rs.core.UriBuilder; import static net.sourceforge.argparse4j.impl.Arguments.store; import static net.sourceforge.argparse4j.impl.Arguments.storeTrue; diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentRestResource.java b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentRestResource.java index 09f5d5bb3ebb9..b8b209d974ee6 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentRestResource.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/agent/AgentRestResource.java @@ -25,17 +25,17 @@ import java.util.concurrent.atomic.AtomicReference; -import javax.servlet.ServletContext; -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; +import jakarta.servlet.ServletContext; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; /** * The REST resource for the Agent. This describes the RPCs which the agent can accept. @@ -54,7 +54,7 @@ public class AgentRestResource { private final AtomicReference agent = new AtomicReference<>(null); - @javax.ws.rs.core.Context + @jakarta.ws.rs.core.Context private ServletContext context; public void setAgent(Agent myAgent) { diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorClient.java b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorClient.java index 00c26adc4c4c8..84880fdd86ffc 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorClient.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorClient.java @@ -63,8 +63,8 @@ import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.core.UriBuilder; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.core.UriBuilder; import static net.sourceforge.argparse4j.impl.Arguments.append; import static net.sourceforge.argparse4j.impl.Arguments.store; diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorRestResource.java b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorRestResource.java index a65c4d26a73a1..5d0ad96e17f23 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorRestResource.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/coordinator/CoordinatorRestResource.java @@ -33,20 +33,20 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicReference; -import javax.servlet.ServletContext; -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; +import jakarta.servlet.ServletContext; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.DefaultValue; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.QueryParam; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; /** * The REST resource for the Coordinator. This describes the RPCs which the coordinator @@ -66,7 +66,7 @@ public class CoordinatorRestResource { private final AtomicReference coordinator = new AtomicReference<>(); - @javax.ws.rs.core.Context + @jakarta.ws.rs.core.Context private ServletContext context; public void setCoordinator(Coordinator myCoordinator) { diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/JsonRestServer.java b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/JsonRestServer.java index 44e69ee2dc77f..108642ca1052c 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/JsonRestServer.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/JsonRestServer.java @@ -21,20 +21,19 @@ import org.apache.kafka.trogdor.common.JsonUtil; import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; +import com.fasterxml.jackson.jakarta.rs.json.JacksonJsonProvider; +import org.eclipse.jetty.ee10.servlet.ServletContextHandler; +import org.eclipse.jetty.ee10.servlet.ServletHolder; import org.eclipse.jetty.server.Connector; import org.eclipse.jetty.server.CustomRequestLog; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.server.Slf4jRequestLogWriter; +import org.eclipse.jetty.server.handler.ContextHandlerCollection; import org.eclipse.jetty.server.handler.DefaultHandler; -import org.eclipse.jetty.server.handler.HandlerCollection; -import org.eclipse.jetty.server.handler.RequestLogHandler; import org.eclipse.jetty.server.handler.StatisticsHandler; -import org.eclipse.jetty.servlet.ServletContextHandler; -import org.eclipse.jetty.servlet.ServletHolder; import org.glassfish.jersey.server.ResourceConfig; import org.glassfish.jersey.servlet.ServletContainer; import org.slf4j.Logger; @@ -102,14 +101,13 @@ public void start(Object... resources) { context.setContextPath("/"); context.addServlet(servletHolder, "/*"); - RequestLogHandler requestLogHandler = new RequestLogHandler(); Slf4jRequestLogWriter slf4jRequestLogWriter = new Slf4jRequestLogWriter(); slf4jRequestLogWriter.setLoggerName(JsonRestServer.class.getCanonicalName()); CustomRequestLog requestLog = new CustomRequestLog(slf4jRequestLogWriter, CustomRequestLog.EXTENDED_NCSA_FORMAT + " %{ms}T"); - requestLogHandler.setRequestLog(requestLog); + jettyServer.setRequestLog(requestLog); - HandlerCollection handlers = new HandlerCollection(); - handlers.setHandlers(new Handler[]{context, new DefaultHandler(), requestLogHandler}); + ContextHandlerCollection handlers = new ContextHandlerCollection(); + handlers.setHandlers(new Handler[]{context, new DefaultHandler()}); StatisticsHandler statsHandler = new StatisticsHandler(); statsHandler.setHandler(handlers); jettyServer.setHandler(statsHandler); diff --git a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/RestExceptionMapper.java b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/RestExceptionMapper.java index 2c1a046b480bf..db747030abe47 100644 --- a/trogdor/src/main/java/org/apache/kafka/trogdor/rest/RestExceptionMapper.java +++ b/trogdor/src/main/java/org/apache/kafka/trogdor/rest/RestExceptionMapper.java @@ -25,9 +25,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.core.Response; -import javax.ws.rs.ext.ExceptionMapper; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.core.Response; +import jakarta.ws.rs.ext.ExceptionMapper; public class RestExceptionMapper implements ExceptionMapper { private static final Logger log = LoggerFactory.getLogger(RestExceptionMapper.class); diff --git a/trogdor/src/test/java/org/apache/kafka/trogdor/coordinator/CoordinatorTest.java b/trogdor/src/test/java/org/apache/kafka/trogdor/coordinator/CoordinatorTest.java index 849e2713d9e85..313a5db743741 100644 --- a/trogdor/src/test/java/org/apache/kafka/trogdor/coordinator/CoordinatorTest.java +++ b/trogdor/src/test/java/org/apache/kafka/trogdor/coordinator/CoordinatorTest.java @@ -62,7 +62,7 @@ import java.util.List; import java.util.Optional; -import javax.ws.rs.NotFoundException; +import jakarta.ws.rs.NotFoundException; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/trogdor/src/test/java/org/apache/kafka/trogdor/rest/RestExceptionMapperTest.java b/trogdor/src/test/java/org/apache/kafka/trogdor/rest/RestExceptionMapperTest.java index e446fec68b701..c0f1248ce2257 100644 --- a/trogdor/src/test/java/org/apache/kafka/trogdor/rest/RestExceptionMapperTest.java +++ b/trogdor/src/test/java/org/apache/kafka/trogdor/rest/RestExceptionMapperTest.java @@ -26,8 +26,8 @@ import org.junit.jupiter.api.Test; -import javax.ws.rs.NotFoundException; -import javax.ws.rs.core.Response; +import jakarta.ws.rs.NotFoundException; +import jakarta.ws.rs.core.Response; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows;